public class Calib3d extends Object
Constructor and Description |
---|
Calib3d() |
Modifier and Type | Method and Description |
---|---|
static double |
calibrateCamera(List<Mat> objectPoints,
List<Mat> imagePoints,
Size imageSize,
Mat cameraMatrix,
Mat distCoeffs,
List<Mat> rvecs,
List<Mat> tvecs) |
static double |
calibrateCamera(List<Mat> objectPoints,
List<Mat> imagePoints,
Size imageSize,
Mat cameraMatrix,
Mat distCoeffs,
List<Mat> rvecs,
List<Mat> tvecs,
int flags) |
static double |
calibrateCamera(List<Mat> objectPoints,
List<Mat> imagePoints,
Size imageSize,
Mat cameraMatrix,
Mat distCoeffs,
List<Mat> rvecs,
List<Mat> tvecs,
int flags,
TermCriteria criteria) |
static double |
calibrateCameraExtended(List<Mat> objectPoints,
List<Mat> imagePoints,
Size imageSize,
Mat cameraMatrix,
Mat distCoeffs,
List<Mat> rvecs,
List<Mat> tvecs,
Mat stdDeviationsIntrinsics,
Mat stdDeviationsExtrinsics,
Mat perViewErrors)
Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.
|
static double |
calibrateCameraExtended(List<Mat> objectPoints,
List<Mat> imagePoints,
Size imageSize,
Mat cameraMatrix,
Mat distCoeffs,
List<Mat> rvecs,
List<Mat> tvecs,
Mat stdDeviationsIntrinsics,
Mat stdDeviationsExtrinsics,
Mat perViewErrors,
int flags)
Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.
|
static double |
calibrateCameraExtended(List<Mat> objectPoints,
List<Mat> imagePoints,
Size imageSize,
Mat cameraMatrix,
Mat distCoeffs,
List<Mat> rvecs,
List<Mat> tvecs,
Mat stdDeviationsIntrinsics,
Mat stdDeviationsExtrinsics,
Mat perViewErrors,
int flags,
TermCriteria criteria)
Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.
|
static double |
calibrateCameraRO(List<Mat> objectPoints,
List<Mat> imagePoints,
Size imageSize,
int iFixedPoint,
Mat cameraMatrix,
Mat distCoeffs,
List<Mat> rvecs,
List<Mat> tvecs,
Mat newObjPoints) |
static double |
calibrateCameraRO(List<Mat> objectPoints,
List<Mat> imagePoints,
Size imageSize,
int iFixedPoint,
Mat cameraMatrix,
Mat distCoeffs,
List<Mat> rvecs,
List<Mat> tvecs,
Mat newObjPoints,
int flags) |
static double |
calibrateCameraRO(List<Mat> objectPoints,
List<Mat> imagePoints,
Size imageSize,
int iFixedPoint,
Mat cameraMatrix,
Mat distCoeffs,
List<Mat> rvecs,
List<Mat> tvecs,
Mat newObjPoints,
int flags,
TermCriteria criteria) |
static double |
calibrateCameraROExtended(List<Mat> objectPoints,
List<Mat> imagePoints,
Size imageSize,
int iFixedPoint,
Mat cameraMatrix,
Mat distCoeffs,
List<Mat> rvecs,
List<Mat> tvecs,
Mat newObjPoints,
Mat stdDeviationsIntrinsics,
Mat stdDeviationsExtrinsics,
Mat stdDeviationsObjPoints,
Mat perViewErrors)
Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.
|
static double |
calibrateCameraROExtended(List<Mat> objectPoints,
List<Mat> imagePoints,
Size imageSize,
int iFixedPoint,
Mat cameraMatrix,
Mat distCoeffs,
List<Mat> rvecs,
List<Mat> tvecs,
Mat newObjPoints,
Mat stdDeviationsIntrinsics,
Mat stdDeviationsExtrinsics,
Mat stdDeviationsObjPoints,
Mat perViewErrors,
int flags)
Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.
|
static double |
calibrateCameraROExtended(List<Mat> objectPoints,
List<Mat> imagePoints,
Size imageSize,
int iFixedPoint,
Mat cameraMatrix,
Mat distCoeffs,
List<Mat> rvecs,
List<Mat> tvecs,
Mat newObjPoints,
Mat stdDeviationsIntrinsics,
Mat stdDeviationsExtrinsics,
Mat stdDeviationsObjPoints,
Mat perViewErrors,
int flags,
TermCriteria criteria)
Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.
|
static void |
calibrateHandEye(List<Mat> R_gripper2base,
List<Mat> t_gripper2base,
List<Mat> R_target2cam,
List<Mat> t_target2cam,
Mat R_cam2gripper,
Mat t_cam2gripper)
Computes Hand-Eye calibration: \(_{}^{g}\textrm{T}_c\)
|
static void |
calibrateHandEye(List<Mat> R_gripper2base,
List<Mat> t_gripper2base,
List<Mat> R_target2cam,
List<Mat> t_target2cam,
Mat R_cam2gripper,
Mat t_cam2gripper,
int method)
Computes Hand-Eye calibration: \(_{}^{g}\textrm{T}_c\)
|
static void |
calibrationMatrixValues(Mat cameraMatrix,
Size imageSize,
double apertureWidth,
double apertureHeight,
double[] fovx,
double[] fovy,
double[] focalLength,
Point principalPoint,
double[] aspectRatio)
Computes useful camera characteristics from the camera matrix.
|
static boolean |
checkChessboard(Mat img,
Size size) |
static void |
composeRT(Mat rvec1,
Mat tvec1,
Mat rvec2,
Mat tvec2,
Mat rvec3,
Mat tvec3)
Combines two rotation-and-shift transformations.
|
static void |
composeRT(Mat rvec1,
Mat tvec1,
Mat rvec2,
Mat tvec2,
Mat rvec3,
Mat tvec3,
Mat dr3dr1)
Combines two rotation-and-shift transformations.
|
static void |
composeRT(Mat rvec1,
Mat tvec1,
Mat rvec2,
Mat tvec2,
Mat rvec3,
Mat tvec3,
Mat dr3dr1,
Mat dr3dt1)
Combines two rotation-and-shift transformations.
|
static void |
composeRT(Mat rvec1,
Mat tvec1,
Mat rvec2,
Mat tvec2,
Mat rvec3,
Mat tvec3,
Mat dr3dr1,
Mat dr3dt1,
Mat dr3dr2)
Combines two rotation-and-shift transformations.
|
static void |
composeRT(Mat rvec1,
Mat tvec1,
Mat rvec2,
Mat tvec2,
Mat rvec3,
Mat tvec3,
Mat dr3dr1,
Mat dr3dt1,
Mat dr3dr2,
Mat dr3dt2)
Combines two rotation-and-shift transformations.
|
static void |
composeRT(Mat rvec1,
Mat tvec1,
Mat rvec2,
Mat tvec2,
Mat rvec3,
Mat tvec3,
Mat dr3dr1,
Mat dr3dt1,
Mat dr3dr2,
Mat dr3dt2,
Mat dt3dr1)
Combines two rotation-and-shift transformations.
|
static void |
composeRT(Mat rvec1,
Mat tvec1,
Mat rvec2,
Mat tvec2,
Mat rvec3,
Mat tvec3,
Mat dr3dr1,
Mat dr3dt1,
Mat dr3dr2,
Mat dr3dt2,
Mat dt3dr1,
Mat dt3dt1)
Combines two rotation-and-shift transformations.
|
static void |
composeRT(Mat rvec1,
Mat tvec1,
Mat rvec2,
Mat tvec2,
Mat rvec3,
Mat tvec3,
Mat dr3dr1,
Mat dr3dt1,
Mat dr3dr2,
Mat dr3dt2,
Mat dt3dr1,
Mat dt3dt1,
Mat dt3dr2)
Combines two rotation-and-shift transformations.
|
static void |
composeRT(Mat rvec1,
Mat tvec1,
Mat rvec2,
Mat tvec2,
Mat rvec3,
Mat tvec3,
Mat dr3dr1,
Mat dr3dt1,
Mat dr3dr2,
Mat dr3dt2,
Mat dt3dr1,
Mat dt3dt1,
Mat dt3dr2,
Mat dt3dt2)
Combines two rotation-and-shift transformations.
|
static void |
computeCorrespondEpilines(Mat points,
int whichImage,
Mat F,
Mat lines)
For points in an image of a stereo pair, computes the corresponding epilines in the other image.
|
static void |
convertPointsFromHomogeneous(Mat src,
Mat dst)
Converts points from homogeneous to Euclidean space.
|
static void |
convertPointsToHomogeneous(Mat src,
Mat dst)
Converts points from Euclidean to homogeneous space.
|
static void |
correctMatches(Mat F,
Mat points1,
Mat points2,
Mat newPoints1,
Mat newPoints2)
Refines coordinates of corresponding points.
|
static void |
decomposeEssentialMat(Mat E,
Mat R1,
Mat R2,
Mat t)
Decompose an essential matrix to possible rotations and translation.
|
static int |
decomposeHomographyMat(Mat H,
Mat K,
List<Mat> rotations,
List<Mat> translations,
List<Mat> normals)
Decompose a homography matrix to rotation(s), translation(s) and plane normal(s).
|
static void |
decomposeProjectionMatrix(Mat projMatrix,
Mat cameraMatrix,
Mat rotMatrix,
Mat transVect)
Decomposes a projection matrix into a rotation matrix and a camera matrix.
|
static void |
decomposeProjectionMatrix(Mat projMatrix,
Mat cameraMatrix,
Mat rotMatrix,
Mat transVect,
Mat rotMatrixX)
Decomposes a projection matrix into a rotation matrix and a camera matrix.
|
static void |
decomposeProjectionMatrix(Mat projMatrix,
Mat cameraMatrix,
Mat rotMatrix,
Mat transVect,
Mat rotMatrixX,
Mat rotMatrixY)
Decomposes a projection matrix into a rotation matrix and a camera matrix.
|
static void |
decomposeProjectionMatrix(Mat projMatrix,
Mat cameraMatrix,
Mat rotMatrix,
Mat transVect,
Mat rotMatrixX,
Mat rotMatrixY,
Mat rotMatrixZ)
Decomposes a projection matrix into a rotation matrix and a camera matrix.
|
static void |
decomposeProjectionMatrix(Mat projMatrix,
Mat cameraMatrix,
Mat rotMatrix,
Mat transVect,
Mat rotMatrixX,
Mat rotMatrixY,
Mat rotMatrixZ,
Mat eulerAngles)
Decomposes a projection matrix into a rotation matrix and a camera matrix.
|
static void |
drawChessboardCorners(Mat image,
Size patternSize,
MatOfPoint2f corners,
boolean patternWasFound)
Renders the detected chessboard corners.
|
static void |
drawFrameAxes(Mat image,
Mat cameraMatrix,
Mat distCoeffs,
Mat rvec,
Mat tvec,
float length)
Draw axes of the world/object coordinate system from pose estimation.
|
static void |
drawFrameAxes(Mat image,
Mat cameraMatrix,
Mat distCoeffs,
Mat rvec,
Mat tvec,
float length,
int thickness)
Draw axes of the world/object coordinate system from pose estimation.
|
static Mat |
estimateAffine2D(Mat from,
Mat to)
Computes an optimal affine transformation between two 2D point sets.
|
static Mat |
estimateAffine2D(Mat from,
Mat to,
Mat inliers)
Computes an optimal affine transformation between two 2D point sets.
|
static Mat |
estimateAffine2D(Mat from,
Mat to,
Mat inliers,
int method)
Computes an optimal affine transformation between two 2D point sets.
|
static Mat |
estimateAffine2D(Mat from,
Mat to,
Mat inliers,
int method,
double ransacReprojThreshold)
Computes an optimal affine transformation between two 2D point sets.
|
static Mat |
estimateAffine2D(Mat from,
Mat to,
Mat inliers,
int method,
double ransacReprojThreshold,
long maxIters)
Computes an optimal affine transformation between two 2D point sets.
|
static Mat |
estimateAffine2D(Mat from,
Mat to,
Mat inliers,
int method,
double ransacReprojThreshold,
long maxIters,
double confidence)
Computes an optimal affine transformation between two 2D point sets.
|
static Mat |
estimateAffine2D(Mat from,
Mat to,
Mat inliers,
int method,
double ransacReprojThreshold,
long maxIters,
double confidence,
long refineIters)
Computes an optimal affine transformation between two 2D point sets.
|
static int |
estimateAffine3D(Mat src,
Mat dst,
Mat out,
Mat inliers)
Computes an optimal affine transformation between two 3D point sets.
|
static int |
estimateAffine3D(Mat src,
Mat dst,
Mat out,
Mat inliers,
double ransacThreshold)
Computes an optimal affine transformation between two 3D point sets.
|
static int |
estimateAffine3D(Mat src,
Mat dst,
Mat out,
Mat inliers,
double ransacThreshold,
double confidence)
Computes an optimal affine transformation between two 3D point sets.
|
static Mat |
estimateAffinePartial2D(Mat from,
Mat to)
Computes an optimal limited affine transformation with 4 degrees of freedom between
two 2D point sets.
|
static Mat |
estimateAffinePartial2D(Mat from,
Mat to,
Mat inliers)
Computes an optimal limited affine transformation with 4 degrees of freedom between
two 2D point sets.
|
static Mat |
estimateAffinePartial2D(Mat from,
Mat to,
Mat inliers,
int method)
Computes an optimal limited affine transformation with 4 degrees of freedom between
two 2D point sets.
|
static Mat |
estimateAffinePartial2D(Mat from,
Mat to,
Mat inliers,
int method,
double ransacReprojThreshold)
Computes an optimal limited affine transformation with 4 degrees of freedom between
two 2D point sets.
|
static Mat |
estimateAffinePartial2D(Mat from,
Mat to,
Mat inliers,
int method,
double ransacReprojThreshold,
long maxIters)
Computes an optimal limited affine transformation with 4 degrees of freedom between
two 2D point sets.
|
static Mat |
estimateAffinePartial2D(Mat from,
Mat to,
Mat inliers,
int method,
double ransacReprojThreshold,
long maxIters,
double confidence)
Computes an optimal limited affine transformation with 4 degrees of freedom between
two 2D point sets.
|
static Mat |
estimateAffinePartial2D(Mat from,
Mat to,
Mat inliers,
int method,
double ransacReprojThreshold,
long maxIters,
double confidence,
long refineIters)
Computes an optimal limited affine transformation with 4 degrees of freedom between
two 2D point sets.
|
static void |
filterHomographyDecompByVisibleRefpoints(List<Mat> rotations,
List<Mat> normals,
Mat beforePoints,
Mat afterPoints,
Mat possibleSolutions)
Filters homography decompositions based on additional information.
|
static void |
filterHomographyDecompByVisibleRefpoints(List<Mat> rotations,
List<Mat> normals,
Mat beforePoints,
Mat afterPoints,
Mat possibleSolutions,
Mat pointsMask)
Filters homography decompositions based on additional information.
|
static void |
filterSpeckles(Mat img,
double newVal,
int maxSpeckleSize,
double maxDiff)
Filters off small noise blobs (speckles) in the disparity map
|
static void |
filterSpeckles(Mat img,
double newVal,
int maxSpeckleSize,
double maxDiff,
Mat buf)
Filters off small noise blobs (speckles) in the disparity map
|
static boolean |
find4QuadCornerSubpix(Mat img,
Mat corners,
Size region_size) |
static boolean |
findChessboardCorners(Mat image,
Size patternSize,
MatOfPoint2f corners)
Finds the positions of internal corners of the chessboard.
|
static boolean |
findChessboardCorners(Mat image,
Size patternSize,
MatOfPoint2f corners,
int flags)
Finds the positions of internal corners of the chessboard.
|
static boolean |
findChessboardCornersSB(Mat image,
Size patternSize,
Mat corners)
Finds the positions of internal corners of the chessboard using a sector based approach.
|
static boolean |
findChessboardCornersSB(Mat image,
Size patternSize,
Mat corners,
int flags)
Finds the positions of internal corners of the chessboard using a sector based approach.
|
static boolean |
findCirclesGrid(Mat image,
Size patternSize,
Mat centers) |
static boolean |
findCirclesGrid(Mat image,
Size patternSize,
Mat centers,
int flags) |
static Mat |
findEssentialMat(Mat points1,
Mat points2) |
static Mat |
findEssentialMat(Mat points1,
Mat points2,
double focal) |
static Mat |
findEssentialMat(Mat points1,
Mat points2,
double focal,
Point pp) |
static Mat |
findEssentialMat(Mat points1,
Mat points2,
double focal,
Point pp,
int method) |
static Mat |
findEssentialMat(Mat points1,
Mat points2,
double focal,
Point pp,
int method,
double prob) |
static Mat |
findEssentialMat(Mat points1,
Mat points2,
double focal,
Point pp,
int method,
double prob,
double threshold) |
static Mat |
findEssentialMat(Mat points1,
Mat points2,
double focal,
Point pp,
int method,
double prob,
double threshold,
Mat mask) |
static Mat |
findEssentialMat(Mat points1,
Mat points2,
Mat cameraMatrix)
Calculates an essential matrix from the corresponding points in two images.
|
static Mat |
findEssentialMat(Mat points1,
Mat points2,
Mat cameraMatrix,
int method)
Calculates an essential matrix from the corresponding points in two images.
|
static Mat |
findEssentialMat(Mat points1,
Mat points2,
Mat cameraMatrix,
int method,
double prob)
Calculates an essential matrix from the corresponding points in two images.
|
static Mat |
findEssentialMat(Mat points1,
Mat points2,
Mat cameraMatrix,
int method,
double prob,
double threshold)
Calculates an essential matrix from the corresponding points in two images.
|
static Mat |
findEssentialMat(Mat points1,
Mat points2,
Mat cameraMatrix,
int method,
double prob,
double threshold,
Mat mask)
Calculates an essential matrix from the corresponding points in two images.
|
static Mat |
findFundamentalMat(MatOfPoint2f points1,
MatOfPoint2f points2)
Calculates a fundamental matrix from the corresponding points in two images.
|
static Mat |
findFundamentalMat(MatOfPoint2f points1,
MatOfPoint2f points2,
int method)
Calculates a fundamental matrix from the corresponding points in two images.
|
static Mat |
findFundamentalMat(MatOfPoint2f points1,
MatOfPoint2f points2,
int method,
double ransacReprojThreshold)
Calculates a fundamental matrix from the corresponding points in two images.
|
static Mat |
findFundamentalMat(MatOfPoint2f points1,
MatOfPoint2f points2,
int method,
double ransacReprojThreshold,
double confidence)
Calculates a fundamental matrix from the corresponding points in two images.
|
static Mat |
findFundamentalMat(MatOfPoint2f points1,
MatOfPoint2f points2,
int method,
double ransacReprojThreshold,
double confidence,
Mat mask)
Calculates a fundamental matrix from the corresponding points in two images.
|
static Mat |
findHomography(MatOfPoint2f srcPoints,
MatOfPoint2f dstPoints)
Finds a perspective transformation between two planes.
|
static Mat |
findHomography(MatOfPoint2f srcPoints,
MatOfPoint2f dstPoints,
int method)
Finds a perspective transformation between two planes.
|
static Mat |
findHomography(MatOfPoint2f srcPoints,
MatOfPoint2f dstPoints,
int method,
double ransacReprojThreshold)
Finds a perspective transformation between two planes.
|
static Mat |
findHomography(MatOfPoint2f srcPoints,
MatOfPoint2f dstPoints,
int method,
double ransacReprojThreshold,
Mat mask)
Finds a perspective transformation between two planes.
|
static Mat |
findHomography(MatOfPoint2f srcPoints,
MatOfPoint2f dstPoints,
int method,
double ransacReprojThreshold,
Mat mask,
int maxIters)
Finds a perspective transformation between two planes.
|
static Mat |
findHomography(MatOfPoint2f srcPoints,
MatOfPoint2f dstPoints,
int method,
double ransacReprojThreshold,
Mat mask,
int maxIters,
double confidence)
Finds a perspective transformation between two planes.
|
static double |
fisheye_calibrate(List<Mat> objectPoints,
List<Mat> imagePoints,
Size image_size,
Mat K,
Mat D,
List<Mat> rvecs,
List<Mat> tvecs)
Performs camera calibaration
|
static double |
fisheye_calibrate(List<Mat> objectPoints,
List<Mat> imagePoints,
Size image_size,
Mat K,
Mat D,
List<Mat> rvecs,
List<Mat> tvecs,
int flags)
Performs camera calibaration
|
static double |
fisheye_calibrate(List<Mat> objectPoints,
List<Mat> imagePoints,
Size image_size,
Mat K,
Mat D,
List<Mat> rvecs,
List<Mat> tvecs,
int flags,
TermCriteria criteria)
Performs camera calibaration
|
static void |
fisheye_distortPoints(Mat undistorted,
Mat distorted,
Mat K,
Mat D)
Distorts 2D points using fisheye model.
|
static void |
fisheye_distortPoints(Mat undistorted,
Mat distorted,
Mat K,
Mat D,
double alpha)
Distorts 2D points using fisheye model.
|
static void |
fisheye_estimateNewCameraMatrixForUndistortRectify(Mat K,
Mat D,
Size image_size,
Mat R,
Mat P)
Estimates new camera matrix for undistortion or rectification.
|
static void |
fisheye_estimateNewCameraMatrixForUndistortRectify(Mat K,
Mat D,
Size image_size,
Mat R,
Mat P,
double balance)
Estimates new camera matrix for undistortion or rectification.
|
static void |
fisheye_estimateNewCameraMatrixForUndistortRectify(Mat K,
Mat D,
Size image_size,
Mat R,
Mat P,
double balance,
Size new_size)
Estimates new camera matrix for undistortion or rectification.
|
static void |
fisheye_estimateNewCameraMatrixForUndistortRectify(Mat K,
Mat D,
Size image_size,
Mat R,
Mat P,
double balance,
Size new_size,
double fov_scale)
Estimates new camera matrix for undistortion or rectification.
|
static void |
fisheye_initUndistortRectifyMap(Mat K,
Mat D,
Mat R,
Mat P,
Size size,
int m1type,
Mat map1,
Mat map2)
Computes undistortion and rectification maps for image transform by cv::remap().
|
static void |
fisheye_projectPoints(Mat objectPoints,
Mat imagePoints,
Mat rvec,
Mat tvec,
Mat K,
Mat D) |
static void |
fisheye_projectPoints(Mat objectPoints,
Mat imagePoints,
Mat rvec,
Mat tvec,
Mat K,
Mat D,
double alpha) |
static void |
fisheye_projectPoints(Mat objectPoints,
Mat imagePoints,
Mat rvec,
Mat tvec,
Mat K,
Mat D,
double alpha,
Mat jacobian) |
static double |
fisheye_stereoCalibrate(List<Mat> objectPoints,
List<Mat> imagePoints1,
List<Mat> imagePoints2,
Mat K1,
Mat D1,
Mat K2,
Mat D2,
Size imageSize,
Mat R,
Mat T)
Performs stereo calibration
|
static double |
fisheye_stereoCalibrate(List<Mat> objectPoints,
List<Mat> imagePoints1,
List<Mat> imagePoints2,
Mat K1,
Mat D1,
Mat K2,
Mat D2,
Size imageSize,
Mat R,
Mat T,
int flags)
Performs stereo calibration
|
static double |
fisheye_stereoCalibrate(List<Mat> objectPoints,
List<Mat> imagePoints1,
List<Mat> imagePoints2,
Mat K1,
Mat D1,
Mat K2,
Mat D2,
Size imageSize,
Mat R,
Mat T,
int flags,
TermCriteria criteria)
Performs stereo calibration
|
static void |
fisheye_stereoRectify(Mat K1,
Mat D1,
Mat K2,
Mat D2,
Size imageSize,
Mat R,
Mat tvec,
Mat R1,
Mat R2,
Mat P1,
Mat P2,
Mat Q,
int flags)
Stereo rectification for fisheye camera model
|
static void |
fisheye_stereoRectify(Mat K1,
Mat D1,
Mat K2,
Mat D2,
Size imageSize,
Mat R,
Mat tvec,
Mat R1,
Mat R2,
Mat P1,
Mat P2,
Mat Q,
int flags,
Size newImageSize)
Stereo rectification for fisheye camera model
|
static void |
fisheye_stereoRectify(Mat K1,
Mat D1,
Mat K2,
Mat D2,
Size imageSize,
Mat R,
Mat tvec,
Mat R1,
Mat R2,
Mat P1,
Mat P2,
Mat Q,
int flags,
Size newImageSize,
double balance)
Stereo rectification for fisheye camera model
|
static void |
fisheye_stereoRectify(Mat K1,
Mat D1,
Mat K2,
Mat D2,
Size imageSize,
Mat R,
Mat tvec,
Mat R1,
Mat R2,
Mat P1,
Mat P2,
Mat Q,
int flags,
Size newImageSize,
double balance,
double fov_scale)
Stereo rectification for fisheye camera model
|
static void |
fisheye_undistortImage(Mat distorted,
Mat undistorted,
Mat K,
Mat D)
Transforms an image to compensate for fisheye lens distortion.
|
static void |
fisheye_undistortImage(Mat distorted,
Mat undistorted,
Mat K,
Mat D,
Mat Knew)
Transforms an image to compensate for fisheye lens distortion.
|
static void |
fisheye_undistortImage(Mat distorted,
Mat undistorted,
Mat K,
Mat D,
Mat Knew,
Size new_size)
Transforms an image to compensate for fisheye lens distortion.
|
static void |
fisheye_undistortPoints(Mat distorted,
Mat undistorted,
Mat K,
Mat D)
Undistorts 2D points using fisheye model
|
static void |
fisheye_undistortPoints(Mat distorted,
Mat undistorted,
Mat K,
Mat D,
Mat R)
Undistorts 2D points using fisheye model
|
static void |
fisheye_undistortPoints(Mat distorted,
Mat undistorted,
Mat K,
Mat D,
Mat R,
Mat P)
Undistorts 2D points using fisheye model
|
static Mat |
getDefaultNewCameraMatrix(Mat cameraMatrix)
Returns the default new camera matrix.
|
static Mat |
getDefaultNewCameraMatrix(Mat cameraMatrix,
Size imgsize)
Returns the default new camera matrix.
|
static Mat |
getDefaultNewCameraMatrix(Mat cameraMatrix,
Size imgsize,
boolean centerPrincipalPoint)
Returns the default new camera matrix.
|
static Mat |
getOptimalNewCameraMatrix(Mat cameraMatrix,
Mat distCoeffs,
Size imageSize,
double alpha)
Returns the new camera matrix based on the free scaling parameter.
|
static Mat |
getOptimalNewCameraMatrix(Mat cameraMatrix,
Mat distCoeffs,
Size imageSize,
double alpha,
Size newImgSize)
Returns the new camera matrix based on the free scaling parameter.
|
static Mat |
getOptimalNewCameraMatrix(Mat cameraMatrix,
Mat distCoeffs,
Size imageSize,
double alpha,
Size newImgSize,
Rect validPixROI)
Returns the new camera matrix based on the free scaling parameter.
|
static Mat |
getOptimalNewCameraMatrix(Mat cameraMatrix,
Mat distCoeffs,
Size imageSize,
double alpha,
Size newImgSize,
Rect validPixROI,
boolean centerPrincipalPoint)
Returns the new camera matrix based on the free scaling parameter.
|
static Rect |
getValidDisparityROI(Rect roi1,
Rect roi2,
int minDisparity,
int numberOfDisparities,
int SADWindowSize) |
static Mat |
initCameraMatrix2D(List<MatOfPoint3f> objectPoints,
List<MatOfPoint2f> imagePoints,
Size imageSize)
Finds an initial camera matrix from 3D-2D point correspondences.
|
static Mat |
initCameraMatrix2D(List<MatOfPoint3f> objectPoints,
List<MatOfPoint2f> imagePoints,
Size imageSize,
double aspectRatio)
Finds an initial camera matrix from 3D-2D point correspondences.
|
static void |
initUndistortRectifyMap(Mat cameraMatrix,
Mat distCoeffs,
Mat R,
Mat newCameraMatrix,
Size size,
int m1type,
Mat map1,
Mat map2)
Computes the undistortion and rectification transformation map.
|
static void |
matMulDeriv(Mat A,
Mat B,
Mat dABdA,
Mat dABdB)
Computes partial derivatives of the matrix product for each multiplied matrix.
|
static void |
projectPoints(MatOfPoint3f objectPoints,
Mat rvec,
Mat tvec,
Mat cameraMatrix,
MatOfDouble distCoeffs,
MatOfPoint2f imagePoints)
Projects 3D points to an image plane.
|
static void |
projectPoints(MatOfPoint3f objectPoints,
Mat rvec,
Mat tvec,
Mat cameraMatrix,
MatOfDouble distCoeffs,
MatOfPoint2f imagePoints,
Mat jacobian)
Projects 3D points to an image plane.
|
static void |
projectPoints(MatOfPoint3f objectPoints,
Mat rvec,
Mat tvec,
Mat cameraMatrix,
MatOfDouble distCoeffs,
MatOfPoint2f imagePoints,
Mat jacobian,
double aspectRatio)
Projects 3D points to an image plane.
|
static int |
recoverPose(Mat E,
Mat points1,
Mat points2,
Mat R,
Mat t) |
static int |
recoverPose(Mat E,
Mat points1,
Mat points2,
Mat R,
Mat t,
double focal) |
static int |
recoverPose(Mat E,
Mat points1,
Mat points2,
Mat R,
Mat t,
double focal,
Point pp) |
static int |
recoverPose(Mat E,
Mat points1,
Mat points2,
Mat R,
Mat t,
double focal,
Point pp,
Mat mask) |
static int |
recoverPose(Mat E,
Mat points1,
Mat points2,
Mat cameraMatrix,
Mat R,
Mat t)
Recover relative camera rotation and translation from an estimated essential matrix and the
corresponding points in two images, using cheirality check.
|
static int |
recoverPose(Mat E,
Mat points1,
Mat points2,
Mat cameraMatrix,
Mat R,
Mat t,
double distanceThresh) |
static int |
recoverPose(Mat E,
Mat points1,
Mat points2,
Mat cameraMatrix,
Mat R,
Mat t,
double distanceThresh,
Mat mask) |
static int |
recoverPose(Mat E,
Mat points1,
Mat points2,
Mat cameraMatrix,
Mat R,
Mat t,
double distanceThresh,
Mat mask,
Mat triangulatedPoints) |
static int |
recoverPose(Mat E,
Mat points1,
Mat points2,
Mat cameraMatrix,
Mat R,
Mat t,
Mat mask)
Recover relative camera rotation and translation from an estimated essential matrix and the
corresponding points in two images, using cheirality check.
|
static float |
rectify3Collinear(Mat cameraMatrix1,
Mat distCoeffs1,
Mat cameraMatrix2,
Mat distCoeffs2,
Mat cameraMatrix3,
Mat distCoeffs3,
List<Mat> imgpt1,
List<Mat> imgpt3,
Size imageSize,
Mat R12,
Mat T12,
Mat R13,
Mat T13,
Mat R1,
Mat R2,
Mat R3,
Mat P1,
Mat P2,
Mat P3,
Mat Q,
double alpha,
Size newImgSize,
Rect roi1,
Rect roi2,
int flags) |
static void |
reprojectImageTo3D(Mat disparity,
Mat _3dImage,
Mat Q)
Reprojects a disparity image to 3D space.
|
static void |
reprojectImageTo3D(Mat disparity,
Mat _3dImage,
Mat Q,
boolean handleMissingValues)
Reprojects a disparity image to 3D space.
|
static void |
reprojectImageTo3D(Mat disparity,
Mat _3dImage,
Mat Q,
boolean handleMissingValues,
int ddepth)
Reprojects a disparity image to 3D space.
|
static void |
Rodrigues(Mat src,
Mat dst)
Converts a rotation matrix to a rotation vector or vice versa.
|
static void |
Rodrigues(Mat src,
Mat dst,
Mat jacobian)
Converts a rotation matrix to a rotation vector or vice versa.
|
static double[] |
RQDecomp3x3(Mat src,
Mat mtxR,
Mat mtxQ)
Computes an RQ decomposition of 3x3 matrices.
|
static double[] |
RQDecomp3x3(Mat src,
Mat mtxR,
Mat mtxQ,
Mat Qx)
Computes an RQ decomposition of 3x3 matrices.
|
static double[] |
RQDecomp3x3(Mat src,
Mat mtxR,
Mat mtxQ,
Mat Qx,
Mat Qy)
Computes an RQ decomposition of 3x3 matrices.
|
static double[] |
RQDecomp3x3(Mat src,
Mat mtxR,
Mat mtxQ,
Mat Qx,
Mat Qy,
Mat Qz)
Computes an RQ decomposition of 3x3 matrices.
|
static double |
sampsonDistance(Mat pt1,
Mat pt2,
Mat F)
Calculates the Sampson Distance between two points.
|
static int |
solveP3P(Mat objectPoints,
Mat imagePoints,
Mat cameraMatrix,
Mat distCoeffs,
List<Mat> rvecs,
List<Mat> tvecs,
int flags)
Finds an object pose from 3 3D-2D point correspondences.
|
static boolean |
solvePnP(MatOfPoint3f objectPoints,
MatOfPoint2f imagePoints,
Mat cameraMatrix,
MatOfDouble distCoeffs,
Mat rvec,
Mat tvec)
Finds an object pose from 3D-2D point correspondences.
|
static boolean |
solvePnP(MatOfPoint3f objectPoints,
MatOfPoint2f imagePoints,
Mat cameraMatrix,
MatOfDouble distCoeffs,
Mat rvec,
Mat tvec,
boolean useExtrinsicGuess)
Finds an object pose from 3D-2D point correspondences.
|
static boolean |
solvePnP(MatOfPoint3f objectPoints,
MatOfPoint2f imagePoints,
Mat cameraMatrix,
MatOfDouble distCoeffs,
Mat rvec,
Mat tvec,
boolean useExtrinsicGuess,
int flags)
Finds an object pose from 3D-2D point correspondences.
|
static int |
solvePnPGeneric(Mat objectPoints,
Mat imagePoints,
Mat cameraMatrix,
Mat distCoeffs,
List<Mat> rvecs,
List<Mat> tvecs)
Finds an object pose from 3D-2D point correspondences.
|
static int |
solvePnPGeneric(Mat objectPoints,
Mat imagePoints,
Mat cameraMatrix,
Mat distCoeffs,
List<Mat> rvecs,
List<Mat> tvecs,
boolean useExtrinsicGuess)
Finds an object pose from 3D-2D point correspondences.
|
static int |
solvePnPGeneric(Mat objectPoints,
Mat imagePoints,
Mat cameraMatrix,
Mat distCoeffs,
List<Mat> rvecs,
List<Mat> tvecs,
boolean useExtrinsicGuess,
int flags)
Finds an object pose from 3D-2D point correspondences.
|
static int |
solvePnPGeneric(Mat objectPoints,
Mat imagePoints,
Mat cameraMatrix,
Mat distCoeffs,
List<Mat> rvecs,
List<Mat> tvecs,
boolean useExtrinsicGuess,
int flags,
Mat rvec)
Finds an object pose from 3D-2D point correspondences.
|
static int |
solvePnPGeneric(Mat objectPoints,
Mat imagePoints,
Mat cameraMatrix,
Mat distCoeffs,
List<Mat> rvecs,
List<Mat> tvecs,
boolean useExtrinsicGuess,
int flags,
Mat rvec,
Mat tvec)
Finds an object pose from 3D-2D point correspondences.
|
static int |
solvePnPGeneric(Mat objectPoints,
Mat imagePoints,
Mat cameraMatrix,
Mat distCoeffs,
List<Mat> rvecs,
List<Mat> tvecs,
boolean useExtrinsicGuess,
int flags,
Mat rvec,
Mat tvec,
Mat reprojectionError)
Finds an object pose from 3D-2D point correspondences.
|
static boolean |
solvePnPRansac(MatOfPoint3f objectPoints,
MatOfPoint2f imagePoints,
Mat cameraMatrix,
MatOfDouble distCoeffs,
Mat rvec,
Mat tvec)
Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
|
static boolean |
solvePnPRansac(MatOfPoint3f objectPoints,
MatOfPoint2f imagePoints,
Mat cameraMatrix,
MatOfDouble distCoeffs,
Mat rvec,
Mat tvec,
boolean useExtrinsicGuess)
Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
|
static boolean |
solvePnPRansac(MatOfPoint3f objectPoints,
MatOfPoint2f imagePoints,
Mat cameraMatrix,
MatOfDouble distCoeffs,
Mat rvec,
Mat tvec,
boolean useExtrinsicGuess,
int iterationsCount)
Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
|
static boolean |
solvePnPRansac(MatOfPoint3f objectPoints,
MatOfPoint2f imagePoints,
Mat cameraMatrix,
MatOfDouble distCoeffs,
Mat rvec,
Mat tvec,
boolean useExtrinsicGuess,
int iterationsCount,
float reprojectionError)
Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
|
static boolean |
solvePnPRansac(MatOfPoint3f objectPoints,
MatOfPoint2f imagePoints,
Mat cameraMatrix,
MatOfDouble distCoeffs,
Mat rvec,
Mat tvec,
boolean useExtrinsicGuess,
int iterationsCount,
float reprojectionError,
double confidence)
Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
|
static boolean |
solvePnPRansac(MatOfPoint3f objectPoints,
MatOfPoint2f imagePoints,
Mat cameraMatrix,
MatOfDouble distCoeffs,
Mat rvec,
Mat tvec,
boolean useExtrinsicGuess,
int iterationsCount,
float reprojectionError,
double confidence,
Mat inliers)
Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
|
static boolean |
solvePnPRansac(MatOfPoint3f objectPoints,
MatOfPoint2f imagePoints,
Mat cameraMatrix,
MatOfDouble distCoeffs,
Mat rvec,
Mat tvec,
boolean useExtrinsicGuess,
int iterationsCount,
float reprojectionError,
double confidence,
Mat inliers,
int flags)
Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
|
static void |
solvePnPRefineLM(Mat objectPoints,
Mat imagePoints,
Mat cameraMatrix,
Mat distCoeffs,
Mat rvec,
Mat tvec)
Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.
|
static void |
solvePnPRefineLM(Mat objectPoints,
Mat imagePoints,
Mat cameraMatrix,
Mat distCoeffs,
Mat rvec,
Mat tvec,
TermCriteria criteria)
Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.
|
static void |
solvePnPRefineVVS(Mat objectPoints,
Mat imagePoints,
Mat cameraMatrix,
Mat distCoeffs,
Mat rvec,
Mat tvec)
Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.
|
static void |
solvePnPRefineVVS(Mat objectPoints,
Mat imagePoints,
Mat cameraMatrix,
Mat distCoeffs,
Mat rvec,
Mat tvec,
TermCriteria criteria)
Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.
|
static void |
solvePnPRefineVVS(Mat objectPoints,
Mat imagePoints,
Mat cameraMatrix,
Mat distCoeffs,
Mat rvec,
Mat tvec,
TermCriteria criteria,
double VVSlambda)
Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.
|
static double |
stereoCalibrate(List<Mat> objectPoints,
List<Mat> imagePoints1,
List<Mat> imagePoints2,
Mat cameraMatrix1,
Mat distCoeffs1,
Mat cameraMatrix2,
Mat distCoeffs2,
Size imageSize,
Mat R,
Mat T,
Mat E,
Mat F) |
static double |
stereoCalibrate(List<Mat> objectPoints,
List<Mat> imagePoints1,
List<Mat> imagePoints2,
Mat cameraMatrix1,
Mat distCoeffs1,
Mat cameraMatrix2,
Mat distCoeffs2,
Size imageSize,
Mat R,
Mat T,
Mat E,
Mat F,
int flags) |
static double |
stereoCalibrate(List<Mat> objectPoints,
List<Mat> imagePoints1,
List<Mat> imagePoints2,
Mat cameraMatrix1,
Mat distCoeffs1,
Mat cameraMatrix2,
Mat distCoeffs2,
Size imageSize,
Mat R,
Mat T,
Mat E,
Mat F,
int flags,
TermCriteria criteria) |
static double |
stereoCalibrateExtended(List<Mat> objectPoints,
List<Mat> imagePoints1,
List<Mat> imagePoints2,
Mat cameraMatrix1,
Mat distCoeffs1,
Mat cameraMatrix2,
Mat distCoeffs2,
Size imageSize,
Mat R,
Mat T,
Mat E,
Mat F,
Mat perViewErrors)
Calibrates the stereo camera.
|
static double |
stereoCalibrateExtended(List<Mat> objectPoints,
List<Mat> imagePoints1,
List<Mat> imagePoints2,
Mat cameraMatrix1,
Mat distCoeffs1,
Mat cameraMatrix2,
Mat distCoeffs2,
Size imageSize,
Mat R,
Mat T,
Mat E,
Mat F,
Mat perViewErrors,
int flags)
Calibrates the stereo camera.
|
static double |
stereoCalibrateExtended(List<Mat> objectPoints,
List<Mat> imagePoints1,
List<Mat> imagePoints2,
Mat cameraMatrix1,
Mat distCoeffs1,
Mat cameraMatrix2,
Mat distCoeffs2,
Size imageSize,
Mat R,
Mat T,
Mat E,
Mat F,
Mat perViewErrors,
int flags,
TermCriteria criteria)
Calibrates the stereo camera.
|
static void |
stereoRectify(Mat cameraMatrix1,
Mat distCoeffs1,
Mat cameraMatrix2,
Mat distCoeffs2,
Size imageSize,
Mat R,
Mat T,
Mat R1,
Mat R2,
Mat P1,
Mat P2,
Mat Q)
Computes rectification transforms for each head of a calibrated stereo camera.
|
static void |
stereoRectify(Mat cameraMatrix1,
Mat distCoeffs1,
Mat cameraMatrix2,
Mat distCoeffs2,
Size imageSize,
Mat R,
Mat T,
Mat R1,
Mat R2,
Mat P1,
Mat P2,
Mat Q,
int flags)
Computes rectification transforms for each head of a calibrated stereo camera.
|
static void |
stereoRectify(Mat cameraMatrix1,
Mat distCoeffs1,
Mat cameraMatrix2,
Mat distCoeffs2,
Size imageSize,
Mat R,
Mat T,
Mat R1,
Mat R2,
Mat P1,
Mat P2,
Mat Q,
int flags,
double alpha)
Computes rectification transforms for each head of a calibrated stereo camera.
|
static void |
stereoRectify(Mat cameraMatrix1,
Mat distCoeffs1,
Mat cameraMatrix2,
Mat distCoeffs2,
Size imageSize,
Mat R,
Mat T,
Mat R1,
Mat R2,
Mat P1,
Mat P2,
Mat Q,
int flags,
double alpha,
Size newImageSize)
Computes rectification transforms for each head of a calibrated stereo camera.
|
static void |
stereoRectify(Mat cameraMatrix1,
Mat distCoeffs1,
Mat cameraMatrix2,
Mat distCoeffs2,
Size imageSize,
Mat R,
Mat T,
Mat R1,
Mat R2,
Mat P1,
Mat P2,
Mat Q,
int flags,
double alpha,
Size newImageSize,
Rect validPixROI1)
Computes rectification transforms for each head of a calibrated stereo camera.
|
static void |
stereoRectify(Mat cameraMatrix1,
Mat distCoeffs1,
Mat cameraMatrix2,
Mat distCoeffs2,
Size imageSize,
Mat R,
Mat T,
Mat R1,
Mat R2,
Mat P1,
Mat P2,
Mat Q,
int flags,
double alpha,
Size newImageSize,
Rect validPixROI1,
Rect validPixROI2)
Computes rectification transforms for each head of a calibrated stereo camera.
|
static boolean |
stereoRectifyUncalibrated(Mat points1,
Mat points2,
Mat F,
Size imgSize,
Mat H1,
Mat H2)
Computes a rectification transform for an uncalibrated stereo camera.
|
static boolean |
stereoRectifyUncalibrated(Mat points1,
Mat points2,
Mat F,
Size imgSize,
Mat H1,
Mat H2,
double threshold)
Computes a rectification transform for an uncalibrated stereo camera.
|
static void |
triangulatePoints(Mat projMatr1,
Mat projMatr2,
Mat projPoints1,
Mat projPoints2,
Mat points4D)
Reconstructs points by triangulation.
|
static void |
undistort(Mat src,
Mat dst,
Mat cameraMatrix,
Mat distCoeffs)
Transforms an image to compensate for lens distortion.
|
static void |
undistort(Mat src,
Mat dst,
Mat cameraMatrix,
Mat distCoeffs,
Mat newCameraMatrix)
Transforms an image to compensate for lens distortion.
|
static void |
undistortPoints(MatOfPoint2f src,
MatOfPoint2f dst,
Mat cameraMatrix,
Mat distCoeffs)
Computes the ideal point coordinates from the observed point coordinates.
|
static void |
undistortPoints(MatOfPoint2f src,
MatOfPoint2f dst,
Mat cameraMatrix,
Mat distCoeffs,
Mat R)
Computes the ideal point coordinates from the observed point coordinates.
|
static void |
undistortPoints(MatOfPoint2f src,
MatOfPoint2f dst,
Mat cameraMatrix,
Mat distCoeffs,
Mat R,
Mat P)
Computes the ideal point coordinates from the observed point coordinates.
|
static void |
undistortPointsIter(Mat src,
Mat dst,
Mat cameraMatrix,
Mat distCoeffs,
Mat R,
Mat P,
TermCriteria criteria)
Note: Default version of #undistortPoints does 5 iterations to compute undistorted points.
|
static void |
validateDisparity(Mat disparity,
Mat cost,
int minDisparity,
int numberOfDisparities) |
static void |
validateDisparity(Mat disparity,
Mat cost,
int minDisparity,
int numberOfDisparities,
int disp12MaxDisp) |
public static final int SOLVEPNP_ITERATIVE
public static final int SOLVEPNP_EPNP
public static final int SOLVEPNP_P3P
public static final int SOLVEPNP_DLS
public static final int SOLVEPNP_UPNP
public static final int SOLVEPNP_AP3P
public static final int SOLVEPNP_IPPE
public static final int SOLVEPNP_IPPE_SQUARE
public static final int SOLVEPNP_MAX_COUNT
public static final int CirclesGridFinderParameters_SYMMETRIC_GRID
public static final int CirclesGridFinderParameters_ASYMMETRIC_GRID
public static final int CALIB_HAND_EYE_TSAI
public static final int CALIB_HAND_EYE_PARK
public static final int CALIB_HAND_EYE_HORAUD
public static final int CALIB_HAND_EYE_ANDREFF
public static final int CALIB_HAND_EYE_DANIILIDIS
public static final int CV_ITERATIVE
public static final int CV_EPNP
public static final int CV_P3P
public static final int CV_DLS
public static final int CvLevMarq_DONE
public static final int CvLevMarq_STARTED
public static final int CvLevMarq_CALC_J
public static final int CvLevMarq_CHECK_ERR
public static final int LMEDS
public static final int RANSAC
public static final int RHO
public static final int CALIB_CB_ADAPTIVE_THRESH
public static final int CALIB_CB_NORMALIZE_IMAGE
public static final int CALIB_CB_FILTER_QUADS
public static final int CALIB_CB_FAST_CHECK
public static final int CALIB_CB_EXHAUSTIVE
public static final int CALIB_CB_ACCURACY
public static final int CALIB_CB_SYMMETRIC_GRID
public static final int CALIB_CB_ASYMMETRIC_GRID
public static final int CALIB_CB_CLUSTERING
public static final int CALIB_NINTRINSIC
public static final int CALIB_USE_INTRINSIC_GUESS
public static final int CALIB_FIX_ASPECT_RATIO
public static final int CALIB_FIX_PRINCIPAL_POINT
public static final int CALIB_ZERO_TANGENT_DIST
public static final int CALIB_FIX_FOCAL_LENGTH
public static final int CALIB_FIX_K1
public static final int CALIB_FIX_K2
public static final int CALIB_FIX_K3
public static final int CALIB_FIX_K4
public static final int CALIB_FIX_K5
public static final int CALIB_FIX_K6
public static final int CALIB_RATIONAL_MODEL
public static final int CALIB_THIN_PRISM_MODEL
public static final int CALIB_FIX_S1_S2_S3_S4
public static final int CALIB_TILTED_MODEL
public static final int CALIB_FIX_TAUX_TAUY
public static final int CALIB_USE_QR
public static final int CALIB_FIX_TANGENT_DIST
public static final int CALIB_FIX_INTRINSIC
public static final int CALIB_SAME_FOCAL_LENGTH
public static final int CALIB_ZERO_DISPARITY
public static final int CALIB_USE_LU
public static final int CALIB_USE_EXTRINSIC_GUESS
public static final int FM_7POINT
public static final int FM_8POINT
public static final int FM_LMEDS
public static final int FM_RANSAC
public static final int fisheye_CALIB_USE_INTRINSIC_GUESS
public static final int fisheye_CALIB_RECOMPUTE_EXTRINSIC
public static final int fisheye_CALIB_CHECK_COND
public static final int fisheye_CALIB_FIX_SKEW
public static final int fisheye_CALIB_FIX_K1
public static final int fisheye_CALIB_FIX_K2
public static final int fisheye_CALIB_FIX_K3
public static final int fisheye_CALIB_FIX_K4
public static final int fisheye_CALIB_FIX_INTRINSIC
public static final int fisheye_CALIB_FIX_PRINCIPAL_POINT
public static final int PROJ_SPHERICAL_ORTHO
public static final int PROJ_SPHERICAL_EQRECT
public static Mat estimateAffine2D(Mat from, Mat to, Mat inliers, int method, double ransacReprojThreshold, long maxIters, double confidence, long refineIters)
from
- First input 2D point set containing \((X,Y)\).to
- Second input 2D point set containing \((x,y)\).inliers
- Output vector indicating which points are inliers (1-inlier, 0-outlier).method
- Robust method used to compute transformation. The following methods are possible:
ransacReprojThreshold
- Maximum reprojection error in the RANSAC algorithm to consider
a point as an inlier. Applies only to RANSAC.maxIters
- The maximum number of robust method iterations.confidence
- Confidence level, between 0 and 1, for the estimated transformation. Anything
between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.refineIters
- Maximum number of iterations of refining algorithm (Levenberg-Marquardt).
Passing 0 will disable refining, so the output matrix will be output of robust method.
public static Mat estimateAffine2D(Mat from, Mat to, Mat inliers, int method, double ransacReprojThreshold, long maxIters, double confidence)
from
- First input 2D point set containing \((X,Y)\).to
- Second input 2D point set containing \((x,y)\).inliers
- Output vector indicating which points are inliers (1-inlier, 0-outlier).method
- Robust method used to compute transformation. The following methods are possible:
ransacReprojThreshold
- Maximum reprojection error in the RANSAC algorithm to consider
a point as an inlier. Applies only to RANSAC.maxIters
- The maximum number of robust method iterations.confidence
- Confidence level, between 0 and 1, for the estimated transformation. Anything
between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
Passing 0 will disable refining, so the output matrix will be output of robust method.
public static Mat estimateAffine2D(Mat from, Mat to, Mat inliers, int method, double ransacReprojThreshold, long maxIters)
from
- First input 2D point set containing \((X,Y)\).to
- Second input 2D point set containing \((x,y)\).inliers
- Output vector indicating which points are inliers (1-inlier, 0-outlier).method
- Robust method used to compute transformation. The following methods are possible:
ransacReprojThreshold
- Maximum reprojection error in the RANSAC algorithm to consider
a point as an inlier. Applies only to RANSAC.maxIters
- The maximum number of robust method iterations.
between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
Passing 0 will disable refining, so the output matrix will be output of robust method.
public static Mat estimateAffine2D(Mat from, Mat to, Mat inliers, int method, double ransacReprojThreshold)
from
- First input 2D point set containing \((X,Y)\).to
- Second input 2D point set containing \((x,y)\).inliers
- Output vector indicating which points are inliers (1-inlier, 0-outlier).method
- Robust method used to compute transformation. The following methods are possible:
ransacReprojThreshold
- Maximum reprojection error in the RANSAC algorithm to consider
a point as an inlier. Applies only to RANSAC.
between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
Passing 0 will disable refining, so the output matrix will be output of robust method.
public static Mat estimateAffine2D(Mat from, Mat to, Mat inliers, int method)
from
- First input 2D point set containing \((X,Y)\).to
- Second input 2D point set containing \((x,y)\).inliers
- Output vector indicating which points are inliers (1-inlier, 0-outlier).method
- Robust method used to compute transformation. The following methods are possible:
public static Mat estimateAffine2D(Mat from, Mat to, Mat inliers)
from
- First input 2D point set containing \((X,Y)\).to
- Second input 2D point set containing \((x,y)\).inliers
- Output vector indicating which points are inliers (1-inlier, 0-outlier).
public static Mat estimateAffine2D(Mat from, Mat to)
from
- First input 2D point set containing \((X,Y)\).to
- Second input 2D point set containing \((x,y)\).
public static Mat estimateAffinePartial2D(Mat from, Mat to, Mat inliers, int method, double ransacReprojThreshold, long maxIters, double confidence, long refineIters)
from
- First input 2D point set.to
- Second input 2D point set.inliers
- Output vector indicating which points are inliers.method
- Robust method used to compute transformation. The following methods are possible:
ransacReprojThreshold
- Maximum reprojection error in the RANSAC algorithm to consider
a point as an inlier. Applies only to RANSAC.maxIters
- The maximum number of robust method iterations.confidence
- Confidence level, between 0 and 1, for the estimated transformation. Anything
between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.refineIters
- Maximum number of iterations of refining algorithm (Levenberg-Marquardt).
Passing 0 will disable refining, so the output matrix will be output of robust method.
public static Mat estimateAffinePartial2D(Mat from, Mat to, Mat inliers, int method, double ransacReprojThreshold, long maxIters, double confidence)
from
- First input 2D point set.to
- Second input 2D point set.inliers
- Output vector indicating which points are inliers.method
- Robust method used to compute transformation. The following methods are possible:
ransacReprojThreshold
- Maximum reprojection error in the RANSAC algorithm to consider
a point as an inlier. Applies only to RANSAC.maxIters
- The maximum number of robust method iterations.confidence
- Confidence level, between 0 and 1, for the estimated transformation. Anything
between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
Passing 0 will disable refining, so the output matrix will be output of robust method.
public static Mat estimateAffinePartial2D(Mat from, Mat to, Mat inliers, int method, double ransacReprojThreshold, long maxIters)
from
- First input 2D point set.to
- Second input 2D point set.inliers
- Output vector indicating which points are inliers.method
- Robust method used to compute transformation. The following methods are possible:
ransacReprojThreshold
- Maximum reprojection error in the RANSAC algorithm to consider
a point as an inlier. Applies only to RANSAC.maxIters
- The maximum number of robust method iterations.
between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
Passing 0 will disable refining, so the output matrix will be output of robust method.
public static Mat estimateAffinePartial2D(Mat from, Mat to, Mat inliers, int method, double ransacReprojThreshold)
from
- First input 2D point set.to
- Second input 2D point set.inliers
- Output vector indicating which points are inliers.method
- Robust method used to compute transformation. The following methods are possible:
ransacReprojThreshold
- Maximum reprojection error in the RANSAC algorithm to consider
a point as an inlier. Applies only to RANSAC.
between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
Passing 0 will disable refining, so the output matrix will be output of robust method.
public static Mat estimateAffinePartial2D(Mat from, Mat to, Mat inliers, int method)
from
- First input 2D point set.to
- Second input 2D point set.inliers
- Output vector indicating which points are inliers.method
- Robust method used to compute transformation. The following methods are possible:
public static Mat estimateAffinePartial2D(Mat from, Mat to, Mat inliers)
from
- First input 2D point set.to
- Second input 2D point set.inliers
- Output vector indicating which points are inliers.
public static Mat estimateAffinePartial2D(Mat from, Mat to)
from
- First input 2D point set.to
- Second input 2D point set.
public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix, int method, double prob, double threshold, Mat mask)
points1
- Array of N (N >= 5) 2D points from the first image. The point coordinates should
be floating-point (single or double precision).points2
- Array of the second image points of the same size and format as points1 .cameraMatrix
- Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
Note that this function assumes that points1 and points2 are feature points from cameras with the
same camera matrix.method
- Method for computing an essential matrix.
prob
- Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
confidence (probability) that the estimated matrix is correct.threshold
- Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
line in pixels, beyond which the point is considered an outlier and is not used for computing the
final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
point localization, image resolution, and the image noise.mask
- Output array of N elements, every element of which is set to 0 for outliers and to 1
for the other points. The array is computed only in the RANSAC and LMedS methods.
public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix, int method, double prob, double threshold)
points1
- Array of N (N >= 5) 2D points from the first image. The point coordinates should
be floating-point (single or double precision).points2
- Array of the second image points of the same size and format as points1 .cameraMatrix
- Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
Note that this function assumes that points1 and points2 are feature points from cameras with the
same camera matrix.method
- Method for computing an essential matrix.
prob
- Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
confidence (probability) that the estimated matrix is correct.threshold
- Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
line in pixels, beyond which the point is considered an outlier and is not used for computing the
final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
point localization, image resolution, and the image noise.
for the other points. The array is computed only in the RANSAC and LMedS methods.
public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix, int method, double prob)
points1
- Array of N (N >= 5) 2D points from the first image. The point coordinates should
be floating-point (single or double precision).points2
- Array of the second image points of the same size and format as points1 .cameraMatrix
- Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
Note that this function assumes that points1 and points2 are feature points from cameras with the
same camera matrix.method
- Method for computing an essential matrix.
prob
- Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
confidence (probability) that the estimated matrix is correct.
line in pixels, beyond which the point is considered an outlier and is not used for computing the
final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
point localization, image resolution, and the image noise.
for the other points. The array is computed only in the RANSAC and LMedS methods.
public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix, int method)
points1
- Array of N (N >= 5) 2D points from the first image. The point coordinates should
be floating-point (single or double precision).points2
- Array of the second image points of the same size and format as points1 .cameraMatrix
- Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
Note that this function assumes that points1 and points2 are feature points from cameras with the
same camera matrix.method
- Method for computing an essential matrix.
public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix)
points1
- Array of N (N >= 5) 2D points from the first image. The point coordinates should
be floating-point (single or double precision).points2
- Array of the second image points of the same size and format as points1 .cameraMatrix
- Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
Note that this function assumes that points1 and points2 are feature points from cameras with the
same camera matrix.
public static Mat findEssentialMat(Mat points1, Mat points2, double focal, Point pp, int method, double prob, double threshold, Mat mask)
points1
- Array of N (N >= 5) 2D points from the first image. The point coordinates should
be floating-point (single or double precision).points2
- Array of the second image points of the same size and format as points1 .focal
- focal length of the camera. Note that this function assumes that points1 and points2
are feature points from cameras with same focal length and principal point.pp
- principal point of the camera.method
- Method for computing a fundamental matrix.
threshold
- Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
line in pixels, beyond which the point is considered an outlier and is not used for computing the
final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
point localization, image resolution, and the image noise.prob
- Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
confidence (probability) that the estimated matrix is correct.mask
- Output array of N elements, every element of which is set to 0 for outliers and to 1
for the other points. The array is computed only in the RANSAC and LMedS methods.
public static Mat findEssentialMat(Mat points1, Mat points2, double focal, Point pp, int method, double prob, double threshold)
points1
- Array of N (N >= 5) 2D points from the first image. The point coordinates should
be floating-point (single or double precision).points2
- Array of the second image points of the same size and format as points1 .focal
- focal length of the camera. Note that this function assumes that points1 and points2
are feature points from cameras with same focal length and principal point.pp
- principal point of the camera.method
- Method for computing a fundamental matrix.
threshold
- Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
line in pixels, beyond which the point is considered an outlier and is not used for computing the
final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
point localization, image resolution, and the image noise.prob
- Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
confidence (probability) that the estimated matrix is correct.
for the other points. The array is computed only in the RANSAC and LMedS methods.
public static Mat findEssentialMat(Mat points1, Mat points2, double focal, Point pp, int method, double prob)
points1
- Array of N (N >= 5) 2D points from the first image. The point coordinates should
be floating-point (single or double precision).points2
- Array of the second image points of the same size and format as points1 .focal
- focal length of the camera. Note that this function assumes that points1 and points2
are feature points from cameras with same focal length and principal point.pp
- principal point of the camera.method
- Method for computing a fundamental matrix.
prob
- Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
confidence (probability) that the estimated matrix is correct.
for the other points. The array is computed only in the RANSAC and LMedS methods.
public static Mat findEssentialMat(Mat points1, Mat points2, double focal, Point pp, int method)
points1
- Array of N (N >= 5) 2D points from the first image. The point coordinates should
be floating-point (single or double precision).points2
- Array of the second image points of the same size and format as points1 .focal
- focal length of the camera. Note that this function assumes that points1 and points2
are feature points from cameras with same focal length and principal point.pp
- principal point of the camera.method
- Method for computing a fundamental matrix.
public static Mat findEssentialMat(Mat points1, Mat points2, double focal, Point pp)
points1
- Array of N (N >= 5) 2D points from the first image. The point coordinates should
be floating-point (single or double precision).points2
- Array of the second image points of the same size and format as points1 .focal
- focal length of the camera. Note that this function assumes that points1 and points2
are feature points from cameras with same focal length and principal point.pp
- principal point of the camera.
public static Mat findEssentialMat(Mat points1, Mat points2, double focal)
points1
- Array of N (N >= 5) 2D points from the first image. The point coordinates should
be floating-point (single or double precision).points2
- Array of the second image points of the same size and format as points1 .focal
- focal length of the camera. Note that this function assumes that points1 and points2
are feature points from cameras with same focal length and principal point.
public static Mat findEssentialMat(Mat points1, Mat points2)
points1
- Array of N (N >= 5) 2D points from the first image. The point coordinates should
be floating-point (single or double precision).points2
- Array of the second image points of the same size and format as points1 .
are feature points from cameras with same focal length and principal point.
public static Mat findFundamentalMat(MatOfPoint2f points1, MatOfPoint2f points2, int method, double ransacReprojThreshold, double confidence, Mat mask)
points1
- Array of N points from the first image. The point coordinates should be
floating-point (single or double precision).points2
- Array of the second image points of the same size and format as points1 .method
- Method for computing a fundamental matrix.
ransacReprojThreshold
- Parameter used only for RANSAC. It is the maximum distance from a point to an epipolar
line in pixels, beyond which the point is considered an outlier and is not used for computing the
final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
point localization, image resolution, and the image noise.confidence
- Parameter used for the RANSAC and LMedS methods only. It specifies a desirable level
of confidence (probability) that the estimated matrix is correct.mask
-
// Example. Estimation of fundamental matrix using the RANSAC algorithm
int point_count = 100;
vector<Point2f> points1(point_count);
vector<Point2f> points2(point_count);
// initialize the points here ...
for( int i = 0; i < point_count; i++ )
{
points1[i] = ...;
points2[i] = ...;
}
Mat fundamental_matrix =
findFundamentalMat(points1, points2, FM_RANSAC, 3, 0.99);
public static Mat findFundamentalMat(MatOfPoint2f points1, MatOfPoint2f points2, int method, double ransacReprojThreshold, double confidence)
points1
- Array of N points from the first image. The point coordinates should be
floating-point (single or double precision).points2
- Array of the second image points of the same size and format as points1 .method
- Method for computing a fundamental matrix.
ransacReprojThreshold
- Parameter used only for RANSAC. It is the maximum distance from a point to an epipolar
line in pixels, beyond which the point is considered an outlier and is not used for computing the
final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
point localization, image resolution, and the image noise.confidence
- Parameter used for the RANSAC and LMedS methods only. It specifies a desirable level
of confidence (probability) that the estimated matrix is correct.
// Example. Estimation of fundamental matrix using the RANSAC algorithm
int point_count = 100;
vector<Point2f> points1(point_count);
vector<Point2f> points2(point_count);
// initialize the points here ...
for( int i = 0; i < point_count; i++ )
{
points1[i] = ...;
points2[i] = ...;
}
Mat fundamental_matrix =
findFundamentalMat(points1, points2, FM_RANSAC, 3, 0.99);
public static Mat findFundamentalMat(MatOfPoint2f points1, MatOfPoint2f points2, int method, double ransacReprojThreshold)
points1
- Array of N points from the first image. The point coordinates should be
floating-point (single or double precision).points2
- Array of the second image points of the same size and format as points1 .method
- Method for computing a fundamental matrix.
ransacReprojThreshold
- Parameter used only for RANSAC. It is the maximum distance from a point to an epipolar
line in pixels, beyond which the point is considered an outlier and is not used for computing the
final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
point localization, image resolution, and the image noise.
of confidence (probability) that the estimated matrix is correct.
// Example. Estimation of fundamental matrix using the RANSAC algorithm
int point_count = 100;
vector<Point2f> points1(point_count);
vector<Point2f> points2(point_count);
// initialize the points here ...
for( int i = 0; i < point_count; i++ )
{
points1[i] = ...;
points2[i] = ...;
}
Mat fundamental_matrix =
findFundamentalMat(points1, points2, FM_RANSAC, 3, 0.99);
public static Mat findFundamentalMat(MatOfPoint2f points1, MatOfPoint2f points2, int method)
points1
- Array of N points from the first image. The point coordinates should be
floating-point (single or double precision).points2
- Array of the second image points of the same size and format as points1 .method
- Method for computing a fundamental matrix.
// Example. Estimation of fundamental matrix using the RANSAC algorithm
int point_count = 100;
vector<Point2f> points1(point_count);
vector<Point2f> points2(point_count);
// initialize the points here ...
for( int i = 0; i < point_count; i++ )
{
points1[i] = ...;
points2[i] = ...;
}
Mat fundamental_matrix =
findFundamentalMat(points1, points2, FM_RANSAC, 3, 0.99);
public static Mat findFundamentalMat(MatOfPoint2f points1, MatOfPoint2f points2)
points1
- Array of N points from the first image. The point coordinates should be
floating-point (single or double precision).points2
- Array of the second image points of the same size and format as points1 .
// Example. Estimation of fundamental matrix using the RANSAC algorithm
int point_count = 100;
vector<Point2f> points1(point_count);
vector<Point2f> points2(point_count);
// initialize the points here ...
for( int i = 0; i < point_count; i++ )
{
points1[i] = ...;
points2[i] = ...;
}
Mat fundamental_matrix =
findFundamentalMat(points1, points2, FM_RANSAC, 3, 0.99);
public static Mat findHomography(MatOfPoint2f srcPoints, MatOfPoint2f dstPoints, int method, double ransacReprojThreshold, Mat mask, int maxIters, double confidence)
srcPoints
- Coordinates of the points in the original plane, a matrix of the type CV_32FC2
or vector<Point2f> .dstPoints
- Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
a vector<Point2f> .method
- Method used to compute a homography matrix. The following methods are possible:
ransacReprojThreshold
- Maximum allowed reprojection error to treat a point pair as an inlier
(used in the RANSAC and RHO methods only). That is, if
\(\| \texttt{dstPoints} _i - \texttt{convertPointsHomogeneous} ( \texttt{H} * \texttt{srcPoints} _i) \|_2 > \texttt{ransacReprojThreshold}\)
then the point \(i\) is considered as an outlier. If srcPoints and dstPoints are measured in pixels,
it usually makes sense to set this parameter somewhere in the range of 1 to 10.mask
- Optional output mask set by a robust method ( RANSAC or LMEDS ). Note that the input
mask values are ignored.maxIters
- The maximum number of RANSAC iterations.confidence
- Confidence level, between 0 and 1.
public static Mat findHomography(MatOfPoint2f srcPoints, MatOfPoint2f dstPoints, int method, double ransacReprojThreshold, Mat mask, int maxIters)
srcPoints
- Coordinates of the points in the original plane, a matrix of the type CV_32FC2
or vector<Point2f> .dstPoints
- Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
a vector<Point2f> .method
- Method used to compute a homography matrix. The following methods are possible:
ransacReprojThreshold
- Maximum allowed reprojection error to treat a point pair as an inlier
(used in the RANSAC and RHO methods only). That is, if
\(\| \texttt{dstPoints} _i - \texttt{convertPointsHomogeneous} ( \texttt{H} * \texttt{srcPoints} _i) \|_2 > \texttt{ransacReprojThreshold}\)
then the point \(i\) is considered as an outlier. If srcPoints and dstPoints are measured in pixels,
it usually makes sense to set this parameter somewhere in the range of 1 to 10.mask
- Optional output mask set by a robust method ( RANSAC or LMEDS ). Note that the input
mask values are ignored.maxIters
- The maximum number of RANSAC iterations.
public static Mat findHomography(MatOfPoint2f srcPoints, MatOfPoint2f dstPoints, int method, double ransacReprojThreshold, Mat mask)
srcPoints
- Coordinates of the points in the original plane, a matrix of the type CV_32FC2
or vector<Point2f> .dstPoints
- Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
a vector<Point2f> .method
- Method used to compute a homography matrix. The following methods are possible:
ransacReprojThreshold
- Maximum allowed reprojection error to treat a point pair as an inlier
(used in the RANSAC and RHO methods only). That is, if
\(\| \texttt{dstPoints} _i - \texttt{convertPointsHomogeneous} ( \texttt{H} * \texttt{srcPoints} _i) \|_2 > \texttt{ransacReprojThreshold}\)
then the point \(i\) is considered as an outlier. If srcPoints and dstPoints are measured in pixels,
it usually makes sense to set this parameter somewhere in the range of 1 to 10.mask
- Optional output mask set by a robust method ( RANSAC or LMEDS ). Note that the input
mask values are ignored.
public static Mat findHomography(MatOfPoint2f srcPoints, MatOfPoint2f dstPoints, int method, double ransacReprojThreshold)
srcPoints
- Coordinates of the points in the original plane, a matrix of the type CV_32FC2
or vector<Point2f> .dstPoints
- Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
a vector<Point2f> .method
- Method used to compute a homography matrix. The following methods are possible:
ransacReprojThreshold
- Maximum allowed reprojection error to treat a point pair as an inlier
(used in the RANSAC and RHO methods only). That is, if
\(\| \texttt{dstPoints} _i - \texttt{convertPointsHomogeneous} ( \texttt{H} * \texttt{srcPoints} _i) \|_2 > \texttt{ransacReprojThreshold}\)
then the point \(i\) is considered as an outlier. If srcPoints and dstPoints are measured in pixels,
it usually makes sense to set this parameter somewhere in the range of 1 to 10.
mask values are ignored.
public static Mat findHomography(MatOfPoint2f srcPoints, MatOfPoint2f dstPoints, int method)
srcPoints
- Coordinates of the points in the original plane, a matrix of the type CV_32FC2
or vector<Point2f> .dstPoints
- Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
a vector<Point2f> .method
- Method used to compute a homography matrix. The following methods are possible:
public static Mat findHomography(MatOfPoint2f srcPoints, MatOfPoint2f dstPoints)
srcPoints
- Coordinates of the points in the original plane, a matrix of the type CV_32FC2
or vector<Point2f> .dstPoints
- Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
a vector<Point2f> .
public static Mat getDefaultNewCameraMatrix(Mat cameraMatrix, Size imgsize, boolean centerPrincipalPoint)
cameraMatrix
- Input camera matrix.imgsize
- Camera view image size in pixels.centerPrincipalPoint
- Location of the principal point in the new camera matrix. The
parameter indicates whether this location should be at the image center or not.public static Mat getDefaultNewCameraMatrix(Mat cameraMatrix, Size imgsize)
cameraMatrix
- Input camera matrix.imgsize
- Camera view image size in pixels.
parameter indicates whether this location should be at the image center or not.public static Mat getDefaultNewCameraMatrix(Mat cameraMatrix)
cameraMatrix
- Input camera matrix.
parameter indicates whether this location should be at the image center or not.public static Mat getOptimalNewCameraMatrix(Mat cameraMatrix, Mat distCoeffs, Size imageSize, double alpha, Size newImgSize, Rect validPixROI, boolean centerPrincipalPoint)
cameraMatrix
- Input camera matrix.distCoeffs
- Input vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) of
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
assumed.imageSize
- Original image size.alpha
- Free scaling parameter between 0 (when all the pixels in the undistorted image are
valid) and 1 (when all the source image pixels are retained in the undistorted image). See
stereoRectify for details.newImgSize
- Image size after rectification. By default, it is set to imageSize .validPixROI
- Optional output rectangle that outlines all-good-pixels region in the
undistorted image. See roi1, roi2 description in stereoRectify .centerPrincipalPoint
- Optional flag that indicates whether in the new camera matrix the
principal point should be at the image center or not. By default, the principal point is chosen to
best fit a subset of the source image (determined by alpha) to the corrected image.public static Mat getOptimalNewCameraMatrix(Mat cameraMatrix, Mat distCoeffs, Size imageSize, double alpha, Size newImgSize, Rect validPixROI)
cameraMatrix
- Input camera matrix.distCoeffs
- Input vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) of
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
assumed.imageSize
- Original image size.alpha
- Free scaling parameter between 0 (when all the pixels in the undistorted image are
valid) and 1 (when all the source image pixels are retained in the undistorted image). See
stereoRectify for details.newImgSize
- Image size after rectification. By default, it is set to imageSize .validPixROI
- Optional output rectangle that outlines all-good-pixels region in the
undistorted image. See roi1, roi2 description in stereoRectify .
principal point should be at the image center or not. By default, the principal point is chosen to
best fit a subset of the source image (determined by alpha) to the corrected image.public static Mat getOptimalNewCameraMatrix(Mat cameraMatrix, Mat distCoeffs, Size imageSize, double alpha, Size newImgSize)
cameraMatrix
- Input camera matrix.distCoeffs
- Input vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) of
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
assumed.imageSize
- Original image size.alpha
- Free scaling parameter between 0 (when all the pixels in the undistorted image are
valid) and 1 (when all the source image pixels are retained in the undistorted image). See
stereoRectify for details.newImgSize
- Image size after rectification. By default, it is set to imageSize .
undistorted image. See roi1, roi2 description in stereoRectify .
principal point should be at the image center or not. By default, the principal point is chosen to
best fit a subset of the source image (determined by alpha) to the corrected image.public static Mat getOptimalNewCameraMatrix(Mat cameraMatrix, Mat distCoeffs, Size imageSize, double alpha)
cameraMatrix
- Input camera matrix.distCoeffs
- Input vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) of
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
assumed.imageSize
- Original image size.alpha
- Free scaling parameter between 0 (when all the pixels in the undistorted image are
valid) and 1 (when all the source image pixels are retained in the undistorted image). See
stereoRectify for details.
undistorted image. See roi1, roi2 description in stereoRectify .
principal point should be at the image center or not. By default, the principal point is chosen to
best fit a subset of the source image (determined by alpha) to the corrected image.public static Mat initCameraMatrix2D(List<MatOfPoint3f> objectPoints, List<MatOfPoint2f> imagePoints, Size imageSize, double aspectRatio)
objectPoints
- Vector of vectors of the calibration pattern points in the calibration pattern
coordinate space. In the old interface all the per-view vectors are concatenated. See
calibrateCamera for details.imagePoints
- Vector of vectors of the projections of the calibration pattern points. In the
old interface all the per-view vectors are concatenated.imageSize
- Image size in pixels used to initialize the principal point.aspectRatio
- If it is zero or negative, both \(f_x\) and \(f_y\) are estimated independently.
Otherwise, \(f_x = f_y * \texttt{aspectRatio}\) .
The function estimates and returns an initial camera matrix for the camera calibration process.
Currently, the function only supports planar calibration patterns, which are patterns where each
object point has z-coordinate =0.public static Mat initCameraMatrix2D(List<MatOfPoint3f> objectPoints, List<MatOfPoint2f> imagePoints, Size imageSize)
objectPoints
- Vector of vectors of the calibration pattern points in the calibration pattern
coordinate space. In the old interface all the per-view vectors are concatenated. See
calibrateCamera for details.imagePoints
- Vector of vectors of the projections of the calibration pattern points. In the
old interface all the per-view vectors are concatenated.imageSize
- Image size in pixels used to initialize the principal point.
Otherwise, \(f_x = f_y * \texttt{aspectRatio}\) .
The function estimates and returns an initial camera matrix for the camera calibration process.
Currently, the function only supports planar calibration patterns, which are patterns where each
object point has z-coordinate =0.public static Rect getValidDisparityROI(Rect roi1, Rect roi2, int minDisparity, int numberOfDisparities, int SADWindowSize)
public static double[] RQDecomp3x3(Mat src, Mat mtxR, Mat mtxQ, Mat Qx, Mat Qy, Mat Qz)
src
- 3x3 input matrix.mtxR
- Output 3x3 upper-triangular matrix.mtxQ
- Output 3x3 orthogonal matrix.Qx
- Optional output 3x3 rotation matrix around x-axis.Qy
- Optional output 3x3 rotation matrix around y-axis.Qz
- Optional output 3x3 rotation matrix around z-axis.
The function computes a RQ decomposition using the given rotations. This function is used in
decomposeProjectionMatrix to decompose the left 3x3 submatrix of a projection matrix into a camera
and a rotation matrix.
It optionally returns three rotation matrices, one for each axis, and the three Euler angles in
degrees (as the return value) that could be used in OpenGL. Note, there is always more than one
sequence of rotations about the three principal axes that results in the same orientation of an
object, e.g. see CITE: Slabaugh . Returned tree rotation matrices and corresponding three Euler angles
are only one of the possible solutions.public static double[] RQDecomp3x3(Mat src, Mat mtxR, Mat mtxQ, Mat Qx, Mat Qy)
src
- 3x3 input matrix.mtxR
- Output 3x3 upper-triangular matrix.mtxQ
- Output 3x3 orthogonal matrix.Qx
- Optional output 3x3 rotation matrix around x-axis.Qy
- Optional output 3x3 rotation matrix around y-axis.
The function computes a RQ decomposition using the given rotations. This function is used in
decomposeProjectionMatrix to decompose the left 3x3 submatrix of a projection matrix into a camera
and a rotation matrix.
It optionally returns three rotation matrices, one for each axis, and the three Euler angles in
degrees (as the return value) that could be used in OpenGL. Note, there is always more than one
sequence of rotations about the three principal axes that results in the same orientation of an
object, e.g. see CITE: Slabaugh . Returned tree rotation matrices and corresponding three Euler angles
are only one of the possible solutions.public static double[] RQDecomp3x3(Mat src, Mat mtxR, Mat mtxQ, Mat Qx)
src
- 3x3 input matrix.mtxR
- Output 3x3 upper-triangular matrix.mtxQ
- Output 3x3 orthogonal matrix.Qx
- Optional output 3x3 rotation matrix around x-axis.
The function computes a RQ decomposition using the given rotations. This function is used in
decomposeProjectionMatrix to decompose the left 3x3 submatrix of a projection matrix into a camera
and a rotation matrix.
It optionally returns three rotation matrices, one for each axis, and the three Euler angles in
degrees (as the return value) that could be used in OpenGL. Note, there is always more than one
sequence of rotations about the three principal axes that results in the same orientation of an
object, e.g. see CITE: Slabaugh . Returned tree rotation matrices and corresponding three Euler angles
are only one of the possible solutions.public static double[] RQDecomp3x3(Mat src, Mat mtxR, Mat mtxQ)
src
- 3x3 input matrix.mtxR
- Output 3x3 upper-triangular matrix.mtxQ
- Output 3x3 orthogonal matrix.
The function computes a RQ decomposition using the given rotations. This function is used in
decomposeProjectionMatrix to decompose the left 3x3 submatrix of a projection matrix into a camera
and a rotation matrix.
It optionally returns three rotation matrices, one for each axis, and the three Euler angles in
degrees (as the return value) that could be used in OpenGL. Note, there is always more than one
sequence of rotations about the three principal axes that results in the same orientation of an
object, e.g. see CITE: Slabaugh . Returned tree rotation matrices and corresponding three Euler angles
are only one of the possible solutions.public static boolean find4QuadCornerSubpix(Mat img, Mat corners, Size region_size)
public static boolean findChessboardCorners(Mat image, Size patternSize, MatOfPoint2f corners, int flags)
image
- Source chessboard view. It must be an 8-bit grayscale or color image.patternSize
- Number of inner corners per a chessboard row and column
( patternSize = cv::Size(points_per_row,points_per_colum) = cv::Size(columns,rows) ).corners
- Output array of detected corners.flags
- Various operation flags that can be zero or a combination of the following values:
Size patternsize(8,6); //interior number of corners
Mat gray = ....; //source image
vector<Point2f> corners; //this will be filled by the detected corners
//CALIB_CB_FAST_CHECK saves a lot of time on images
//that do not contain any chessboard corners
bool patternfound = findChessboardCorners(gray, patternsize, corners,
CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE
+ CALIB_CB_FAST_CHECK);
if(patternfound)
cornerSubPix(gray, corners, Size(11, 11), Size(-1, -1),
TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
drawChessboardCorners(img, patternsize, Mat(corners), patternfound);
Note: The function requires white space (like a square-thick border, the wider the better) around
the board to make the detection more robust in various environments. Otherwise, if there is no
border and the background is dark, the outer black squares cannot be segmented properly and so the
square grouping and ordering algorithm fails.public static boolean findChessboardCorners(Mat image, Size patternSize, MatOfPoint2f corners)
image
- Source chessboard view. It must be an 8-bit grayscale or color image.patternSize
- Number of inner corners per a chessboard row and column
( patternSize = cv::Size(points_per_row,points_per_colum) = cv::Size(columns,rows) ).corners
- Output array of detected corners.
Size patternsize(8,6); //interior number of corners
Mat gray = ....; //source image
vector<Point2f> corners; //this will be filled by the detected corners
//CALIB_CB_FAST_CHECK saves a lot of time on images
//that do not contain any chessboard corners
bool patternfound = findChessboardCorners(gray, patternsize, corners,
CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE
+ CALIB_CB_FAST_CHECK);
if(patternfound)
cornerSubPix(gray, corners, Size(11, 11), Size(-1, -1),
TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
drawChessboardCorners(img, patternsize, Mat(corners), patternfound);
Note: The function requires white space (like a square-thick border, the wider the better) around
the board to make the detection more robust in various environments. Otherwise, if there is no
border and the background is dark, the outer black squares cannot be segmented properly and so the
square grouping and ordering algorithm fails.public static boolean findChessboardCornersSB(Mat image, Size patternSize, Mat corners, int flags)
image
- Source chessboard view. It must be an 8-bit grayscale or color image.patternSize
- Number of inner corners per a chessboard row and column
( patternSize = cv::Size(points_per_row,points_per_colum) = cv::Size(columns,rows) ).corners
- Output array of detected corners.flags
- Various operation flags that can be zero or a combination of the following values:
public static boolean findChessboardCornersSB(Mat image, Size patternSize, Mat corners)
image
- Source chessboard view. It must be an 8-bit grayscale or color image.patternSize
- Number of inner corners per a chessboard row and column
( patternSize = cv::Size(points_per_row,points_per_colum) = cv::Size(columns,rows) ).corners
- Output array of detected corners.
public static boolean findCirclesGrid(Mat image, Size patternSize, Mat centers, int flags)
public static boolean solvePnP(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, boolean useExtrinsicGuess, int flags)
objectPoints
- Array of object points in the object coordinate space, Nx3 1-channel or
1xN/Nx1 3-channel, where N is the number of points. vector<Point3f> can be also passed here.imagePoints
- Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
where N is the number of points. vector<Point2f> can be also passed here.cameraMatrix
- Input camera matrix \(A = \vecthreethree{fx}{0}{cx}{0}{fy}{cy}{0}{0}{1}\) .distCoeffs
- Input vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) of
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
assumed.rvec
- Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
the model coordinate system to the camera coordinate system.tvec
- Output translation vector.useExtrinsicGuess
- Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
the provided rvec and tvec values as initial approximations of the rotation and translation
vectors, respectively, and further optimizes them.flags
- Method for solving a PnP problem:
rvec
) and the translation (tvec
) vectors that allow transforming
a 3D point expressed in the world frame into the camera frame:
\(
\begin{align*}
\begin{bmatrix}
X_c \\
Y_c \\
Z_c \\
1
\end{bmatrix} &=
\hspace{0.2em} ^{c}\bf{M}_w
\begin{bmatrix}
X_{w} \\
Y_{w} \\
Z_{w} \\
1
\end{bmatrix} \\
\begin{bmatrix}
X_c \\
Y_c \\
Z_c \\
1
\end{bmatrix} &=
\begin{bmatrix}
r_{11} & r_{12} & r_{13} & t_x \\
r_{21} & r_{22} & r_{23} & t_y \\
r_{31} & r_{32} & r_{33} & t_z \\
0 & 0 & 0 & 1
\end{bmatrix}
\begin{bmatrix}
X_{w} \\
Y_{w} \\
Z_{w} \\
1
\end{bmatrix}
\end{align*}
\)
Note:
useExtrinsicGuess=true
, the minimum number of points is 3 (3 points
are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
global solution to converge.
public static boolean solvePnP(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, boolean useExtrinsicGuess)
objectPoints
- Array of object points in the object coordinate space, Nx3 1-channel or
1xN/Nx1 3-channel, where N is the number of points. vector<Point3f> can be also passed here.imagePoints
- Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
where N is the number of points. vector<Point2f> can be also passed here.cameraMatrix
- Input camera matrix \(A = \vecthreethree{fx}{0}{cx}{0}{fy}{cy}{0}{0}{1}\) .distCoeffs
- Input vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) of
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
assumed.rvec
- Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
the model coordinate system to the camera coordinate system.tvec
- Output translation vector.useExtrinsicGuess
- Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
the provided rvec and tvec values as initial approximations of the rotation and translation
vectors, respectively, and further optimizes them.
rvec
) and the translation (tvec
) vectors that allow transforming
a 3D point expressed in the world frame into the camera frame:
\(
\begin{align*}
\begin{bmatrix}
X_c \\
Y_c \\
Z_c \\
1
\end{bmatrix} &=
\hspace{0.2em} ^{c}\bf{M}_w
\begin{bmatrix}
X_{w} \\
Y_{w} \\
Z_{w} \\
1
\end{bmatrix} \\
\begin{bmatrix}
X_c \\
Y_c \\
Z_c \\
1
\end{bmatrix} &=
\begin{bmatrix}
r_{11} & r_{12} & r_{13} & t_x \\
r_{21} & r_{22} & r_{23} & t_y \\
r_{31} & r_{32} & r_{33} & t_z \\
0 & 0 & 0 & 1
\end{bmatrix}
\begin{bmatrix}
X_{w} \\
Y_{w} \\
Z_{w} \\
1
\end{bmatrix}
\end{align*}
\)
Note:
useExtrinsicGuess=true
, the minimum number of points is 3 (3 points
are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
global solution to converge.
public static boolean solvePnP(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec)
objectPoints
- Array of object points in the object coordinate space, Nx3 1-channel or
1xN/Nx1 3-channel, where N is the number of points. vector<Point3f> can be also passed here.imagePoints
- Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
where N is the number of points. vector<Point2f> can be also passed here.cameraMatrix
- Input camera matrix \(A = \vecthreethree{fx}{0}{cx}{0}{fy}{cy}{0}{0}{1}\) .distCoeffs
- Input vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) of
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
assumed.rvec
- Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
the model coordinate system to the camera coordinate system.tvec
- Output translation vector.
the provided rvec and tvec values as initial approximations of the rotation and translation
vectors, respectively, and further optimizes them.
rvec
) and the translation (tvec
) vectors that allow transforming
a 3D point expressed in the world frame into the camera frame:
\(
\begin{align*}
\begin{bmatrix}
X_c \\
Y_c \\
Z_c \\
1
\end{bmatrix} &=
\hspace{0.2em} ^{c}\bf{M}_w
\begin{bmatrix}
X_{w} \\
Y_{w} \\
Z_{w} \\
1
\end{bmatrix} \\
\begin{bmatrix}
X_c \\
Y_c \\
Z_c \\
1
\end{bmatrix} &=
\begin{bmatrix}
r_{11} & r_{12} & r_{13} & t_x \\
r_{21} & r_{22} & r_{23} & t_y \\
r_{31} & r_{32} & r_{33} & t_z \\
0 & 0 & 0 & 1
\end{bmatrix}
\begin{bmatrix}
X_{w} \\
Y_{w} \\
Z_{w} \\
1
\end{bmatrix}
\end{align*}
\)
Note:
useExtrinsicGuess=true
, the minimum number of points is 3 (3 points
are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
global solution to converge.
public static boolean solvePnPRansac(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, boolean useExtrinsicGuess, int iterationsCount, float reprojectionError, double confidence, Mat inliers, int flags)
objectPoints
- Array of object points in the object coordinate space, Nx3 1-channel or
1xN/Nx1 3-channel, where N is the number of points. vector<Point3f> can be also passed here.imagePoints
- Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
where N is the number of points. vector<Point2f> can be also passed here.cameraMatrix
- Input camera matrix \(A = \vecthreethree{fx}{0}{cx}{0}{fy}{cy}{0}{0}{1}\) .distCoeffs
- Input vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) of
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
assumed.rvec
- Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
the model coordinate system to the camera coordinate system.tvec
- Output translation vector.useExtrinsicGuess
- Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses
the provided rvec and tvec values as initial approximations of the rotation and translation
vectors, respectively, and further optimizes them.iterationsCount
- Number of iterations.reprojectionError
- Inlier threshold value used by the RANSAC procedure. The parameter value
is the maximum allowed distance between the observed and computed point projections to consider it
an inlier.confidence
- The probability that the algorithm produces a useful result.inliers
- Output vector that contains indices of inliers in objectPoints and imagePoints .flags
- Method for solving a PnP problem (see REF: solvePnP ).
The function estimates an object pose given a set of object points, their corresponding image
projections, as well as the camera matrix and the distortion coefficients. This function finds such
a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
makes the function resistant to outliers.
Note:
public static boolean solvePnPRansac(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, boolean useExtrinsicGuess, int iterationsCount, float reprojectionError, double confidence, Mat inliers)
objectPoints
- Array of object points in the object coordinate space, Nx3 1-channel or
1xN/Nx1 3-channel, where N is the number of points. vector<Point3f> can be also passed here.imagePoints
- Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
where N is the number of points. vector<Point2f> can be also passed here.cameraMatrix
- Input camera matrix \(A = \vecthreethree{fx}{0}{cx}{0}{fy}{cy}{0}{0}{1}\) .distCoeffs
- Input vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) of
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
assumed.rvec
- Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
the model coordinate system to the camera coordinate system.tvec
- Output translation vector.useExtrinsicGuess
- Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses
the provided rvec and tvec values as initial approximations of the rotation and translation
vectors, respectively, and further optimizes them.iterationsCount
- Number of iterations.reprojectionError
- Inlier threshold value used by the RANSAC procedure. The parameter value
is the maximum allowed distance between the observed and computed point projections to consider it
an inlier.confidence
- The probability that the algorithm produces a useful result.inliers
- Output vector that contains indices of inliers in objectPoints and imagePoints .
The function estimates an object pose given a set of object points, their corresponding image
projections, as well as the camera matrix and the distortion coefficients. This function finds such
a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
makes the function resistant to outliers.
Note:
public static boolean solvePnPRansac(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, boolean useExtrinsicGuess, int iterationsCount, float reprojectionError, double confidence)
objectPoints
- Array of object points in the object coordinate space, Nx3 1-channel or
1xN/Nx1 3-channel, where N is the number of points. vector<Point3f> can be also passed here.imagePoints
- Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
where N is the number of points. vector<Point2f> can be also passed here.cameraMatrix
- Input camera matrix \(A = \vecthreethree{fx}{0}{cx}{0}{fy}{cy}{0}{0}{1}\) .distCoeffs
- Input vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) of
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
assumed.rvec
- Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
the model coordinate system to the camera coordinate system.tvec
- Output translation vector.useExtrinsicGuess
- Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses
the provided rvec and tvec values as initial approximations of the rotation and translation
vectors, respectively, and further optimizes them.iterationsCount
- Number of iterations.reprojectionError
- Inlier threshold value used by the RANSAC procedure. The parameter value
is the maximum allowed distance between the observed and computed point projections to consider it
an inlier.confidence
- The probability that the algorithm produces a useful result.
The function estimates an object pose given a set of object points, their corresponding image
projections, as well as the camera matrix and the distortion coefficients. This function finds such
a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
makes the function resistant to outliers.
Note:
public static boolean solvePnPRansac(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, boolean useExtrinsicGuess, int iterationsCount, float reprojectionError)
objectPoints
- Array of object points in the object coordinate space, Nx3 1-channel or
1xN/Nx1 3-channel, where N is the number of points. vector<Point3f> can be also passed here.imagePoints
- Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
where N is the number of points. vector<Point2f> can be also passed here.cameraMatrix
- Input camera matrix \(A = \vecthreethree{fx}{0}{cx}{0}{fy}{cy}{0}{0}{1}\) .distCoeffs
- Input vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) of
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
assumed.rvec
- Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
the model coordinate system to the camera coordinate system.tvec
- Output translation vector.useExtrinsicGuess
- Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses
the provided rvec and tvec values as initial approximations of the rotation and translation
vectors, respectively, and further optimizes them.iterationsCount
- Number of iterations.reprojectionError
- Inlier threshold value used by the RANSAC procedure. The parameter value
is the maximum allowed distance between the observed and computed point projections to consider it
an inlier.
The function estimates an object pose given a set of object points, their corresponding image
projections, as well as the camera matrix and the distortion coefficients. This function finds such
a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
makes the function resistant to outliers.
Note:
public static boolean solvePnPRansac(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, boolean useExtrinsicGuess, int iterationsCount)
objectPoints
- Array of object points in the object coordinate space, Nx3 1-channel or
1xN/Nx1 3-channel, where N is the number of points. vector<Point3f> can be also passed here.imagePoints
- Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
where N is the number of points. vector<Point2f> can be also passed here.cameraMatrix
- Input camera matrix \(A = \vecthreethree{fx}{0}{cx}{0}{fy}{cy}{0}{0}{1}\) .distCoeffs
- Input vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) of
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
assumed.rvec
- Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
the model coordinate system to the camera coordinate system.tvec
- Output translation vector.useExtrinsicGuess
- Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses
the provided rvec and tvec values as initial approximations of the rotation and translation
vectors, respectively, and further optimizes them.iterationsCount
- Number of iterations.
is the maximum allowed distance between the observed and computed point projections to consider it
an inlier.
The function estimates an object pose given a set of object points, their corresponding image
projections, as well as the camera matrix and the distortion coefficients. This function finds such
a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
makes the function resistant to outliers.
Note:
public static boolean solvePnPRansac(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, boolean useExtrinsicGuess)
objectPoints
- Array of object points in the object coordinate space, Nx3 1-channel or
1xN/Nx1 3-channel, where N is the number of points. vector<Point3f> can be also passed here.imagePoints
- Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
where N is the number of points. vector<Point2f> can be also passed here.cameraMatrix
- Input camera matrix \(A = \vecthreethree{fx}{0}{cx}{0}{fy}{cy}{0}{0}{1}\) .distCoeffs
- Input vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) of
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
assumed.rvec
- Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
the model coordinate system to the camera coordinate system.tvec
- Output translation vector.useExtrinsicGuess
- Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses
the provided rvec and tvec values as initial approximations of the rotation and translation
vectors, respectively, and further optimizes them.
is the maximum allowed distance between the observed and computed point projections to consider it
an inlier.
The function estimates an object pose given a set of object points, their corresponding image
projections, as well as the camera matrix and the distortion coefficients. This function finds such
a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
makes the function resistant to outliers.
Note:
public static boolean solvePnPRansac(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec)
objectPoints
- Array of object points in the object coordinate space, Nx3 1-channel or
1xN/Nx1 3-channel, where N is the number of points. vector<Point3f> can be also passed here.imagePoints
- Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
where N is the number of points. vector<Point2f> can be also passed here.cameraMatrix
- Input camera matrix \(A = \vecthreethree{fx}{0}{cx}{0}{fy}{cy}{0}{0}{1}\) .distCoeffs
- Input vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) of
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
assumed.rvec
- Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
the model coordinate system to the camera coordinate system.tvec
- Output translation vector.
the provided rvec and tvec values as initial approximations of the rotation and translation
vectors, respectively, and further optimizes them.
is the maximum allowed distance between the observed and computed point projections to consider it
an inlier.
The function estimates an object pose given a set of object points, their corresponding image
projections, as well as the camera matrix and the distortion coefficients. This function finds such
a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
makes the function resistant to outliers.
Note:
public static boolean stereoRectifyUncalibrated(Mat points1, Mat points2, Mat F, Size imgSize, Mat H1, Mat H2, double threshold)
points1
- Array of feature points in the first image.points2
- The corresponding points in the second image. The same formats as in
findFundamentalMat are supported.F
- Input fundamental matrix. It can be computed from the same set of point pairs using
findFundamentalMat .imgSize
- Size of the image.H1
- Output rectification homography matrix for the first image.H2
- Output rectification homography matrix for the second image.threshold
- Optional threshold used to filter out the outliers. If the parameter is greater
than zero, all the point pairs that do not comply with the epipolar geometry (that is, the points
for which \(|\texttt{points2[i]}^T*\texttt{F}*\texttt{points1[i]}|>\texttt{threshold}\) ) are
rejected prior to computing the homographies. Otherwise, all the points are considered inliers.
The function computes the rectification transformations without knowing intrinsic parameters of the
cameras and their relative position in the space, which explains the suffix "uncalibrated". Another
related difference from stereoRectify is that the function outputs not the rectification
transformations in the object (3D) space, but the planar perspective transformations encoded by the
homography matrices H1 and H2 . The function implements the algorithm CITE: Hartley99 .
Note:
While the algorithm does not need to know the intrinsic parameters of the cameras, it heavily
depends on the epipolar geometry. Therefore, if the camera lenses have a significant distortion,
it would be better to correct it before computing the fundamental matrix and calling this
function. For example, distortion coefficients can be estimated for each head of stereo camera
separately by using calibrateCamera . Then, the images can be corrected using undistort , or
just the point coordinates can be corrected with undistortPoints .public static boolean stereoRectifyUncalibrated(Mat points1, Mat points2, Mat F, Size imgSize, Mat H1, Mat H2)
points1
- Array of feature points in the first image.points2
- The corresponding points in the second image. The same formats as in
findFundamentalMat are supported.F
- Input fundamental matrix. It can be computed from the same set of point pairs using
findFundamentalMat .imgSize
- Size of the image.H1
- Output rectification homography matrix for the first image.H2
- Output rectification homography matrix for the second image.
than zero, all the point pairs that do not comply with the epipolar geometry (that is, the points
for which \(|\texttt{points2[i]}^T*\texttt{F}*\texttt{points1[i]}|>\texttt{threshold}\) ) are
rejected prior to computing the homographies. Otherwise, all the points are considered inliers.
The function computes the rectification transformations without knowing intrinsic parameters of the
cameras and their relative position in the space, which explains the suffix "uncalibrated". Another
related difference from stereoRectify is that the function outputs not the rectification
transformations in the object (3D) space, but the planar perspective transformations encoded by the
homography matrices H1 and H2 . The function implements the algorithm CITE: Hartley99 .
Note:
While the algorithm does not need to know the intrinsic parameters of the cameras, it heavily
depends on the epipolar geometry. Therefore, if the camera lenses have a significant distortion,
it would be better to correct it before computing the fundamental matrix and calling this
function. For example, distortion coefficients can be estimated for each head of stereo camera
separately by using calibrateCamera . Then, the images can be corrected using undistort , or
just the point coordinates can be corrected with undistortPoints .public static double calibrateCameraExtended(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat stdDeviationsIntrinsics, Mat stdDeviationsExtrinsics, Mat perViewErrors, int flags, TermCriteria criteria)
objectPoints
- In the new interface it is a vector of vectors of calibration pattern points in
the calibration pattern coordinate space (e.g. std::vector<std::vector<cv::Vec3f>>). The outer
vector contains as many elements as the number of the pattern views. If the same calibration pattern
is shown in each view and it is fully visible, all the vectors will be the same. Although, it is
possible to use partially occluded patterns, or even different patterns in different views. Then,
the vectors will be different. The points are 3D, but since they are in a pattern coordinate system,
then, if the rig is planar, it may make sense to put the model to a XY coordinate plane so that
Z-coordinate of each input object point is 0.
In the old interface all the vectors of object points from different views are concatenated
together.imagePoints
- In the new interface it is a vector of vectors of the projections of calibration
pattern points (e.g. std::vector<std::vector<cv::Vec2f>>). imagePoints.size() and
objectPoints.size() and imagePoints[i].size() must be equal to objectPoints[i].size() for each i.
In the old interface all the vectors of object points from different views are concatenated
together.imageSize
- Size of the image used only to initialize the intrinsic camera matrix.cameraMatrix
- Output 3x3 floating-point camera matrix
\(A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) . If CV\_CALIB\_USE\_INTRINSIC\_GUESS
and/or CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be
initialized before calling the function.distCoeffs
- Output vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) of
4, 5, 8, 12 or 14 elements.rvecs
- Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view
(e.g. std::vector<cv::Mat>>). That is, each k-th rotation vector together with the corresponding
k-th translation vector (see the next output parameter description) brings the calibration pattern
from the model coordinate space (in which object points are specified) to the world coordinate
space, that is, a real position of the calibration pattern in the k-th pattern view (k=0.. *M* -1).tvecs
- Output vector of translation vectors estimated for each pattern view.stdDeviationsIntrinsics
- Output vector of standard deviations estimated for intrinsic parameters.
Order of deviations values:
\((f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3,
s_4, \tau_x, \tau_y)\) If one of parameters is not estimated, it's deviation is equals to zero.stdDeviationsExtrinsics
- Output vector of standard deviations estimated for extrinsic parameters.
Order of deviations values: \((R_1, T_1, \dotsc , R_M, T_M)\) where M is number of pattern views,
\(R_i, T_i\) are concatenated 1x3 vectors.perViewErrors
- Output vector of the RMS re-projection error estimated for each pattern view.flags
- Different flags that may be zero or a combination of the following values:
criteria
- Termination criteria for the iterative optimization algorithm.
public static double calibrateCameraExtended(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat stdDeviationsIntrinsics, Mat stdDeviationsExtrinsics, Mat perViewErrors, int flags)
objectPoints
- In the new interface it is a vector of vectors of calibration pattern points in
the calibration pattern coordinate space (e.g. std::vector<std::vector<cv::Vec3f>>). The outer
vector contains as many elements as the number of the pattern views. If the same calibration pattern
is shown in each view and it is fully visible, all the vectors will be the same. Although, it is
possible to use partially occluded patterns, or even different patterns in different views. Then,
the vectors will be different. The points are 3D, but since they are in a pattern coordinate system,
then, if the rig is planar, it may make sense to put the model to a XY coordinate plane so that
Z-coordinate of each input object point is 0.
In the old interface all the vectors of object points from different views are concatenated
together.imagePoints
- In the new interface it is a vector of vectors of the projections of calibration
pattern points (e.g. std::vector<std::vector<cv::Vec2f>>). imagePoints.size() and
objectPoints.size() and imagePoints[i].size() must be equal to objectPoints[i].size() for each i.
In the old interface all the vectors of object points from different views are concatenated
together.imageSize
- Size of the image used only to initialize the intrinsic camera matrix.cameraMatrix
- Output 3x3 floating-point camera matrix
\(A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) . If CV\_CALIB\_USE\_INTRINSIC\_GUESS
and/or CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be
initialized before calling the function.distCoeffs
- Output vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) of
4, 5, 8, 12 or 14 elements.rvecs
- Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view
(e.g. std::vector<cv::Mat>>). That is, each k-th rotation vector together with the corresponding
k-th translation vector (see the next output parameter description) brings the calibration pattern
from the model coordinate space (in which object points are specified) to the world coordinate
space, that is, a real position of the calibration pattern in the k-th pattern view (k=0.. *M* -1).tvecs
- Output vector of translation vectors estimated for each pattern view.stdDeviationsIntrinsics
- Output vector of standard deviations estimated for intrinsic parameters.
Order of deviations values:
\((f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3,
s_4, \tau_x, \tau_y)\) If one of parameters is not estimated, it's deviation is equals to zero.stdDeviationsExtrinsics
- Output vector of standard deviations estimated for extrinsic parameters.
Order of deviations values: \((R_1, T_1, \dotsc , R_M, T_M)\) where M is number of pattern views,
\(R_i, T_i\) are concatenated 1x3 vectors.perViewErrors
- Output vector of the RMS re-projection error estimated for each pattern view.flags
- Different flags that may be zero or a combination of the following values:
public static double calibrateCameraExtended(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat stdDeviationsIntrinsics, Mat stdDeviationsExtrinsics, Mat perViewErrors)
objectPoints
- In the new interface it is a vector of vectors of calibration pattern points in
the calibration pattern coordinate space (e.g. std::vector<std::vector<cv::Vec3f>>). The outer
vector contains as many elements as the number of the pattern views. If the same calibration pattern
is shown in each view and it is fully visible, all the vectors will be the same. Although, it is
possible to use partially occluded patterns, or even different patterns in different views. Then,
the vectors will be different. The points are 3D, but since they are in a pattern coordinate system,
then, if the rig is planar, it may make sense to put the model to a XY coordinate plane so that
Z-coordinate of each input object point is 0.
In the old interface all the vectors of object points from different views are concatenated
together.imagePoints
- In the new interface it is a vector of vectors of the projections of calibration
pattern points (e.g. std::vector<std::vector<cv::Vec2f>>). imagePoints.size() and
objectPoints.size() and imagePoints[i].size() must be equal to objectPoints[i].size() for each i.
In the old interface all the vectors of object points from different views are concatenated
together.imageSize
- Size of the image used only to initialize the intrinsic camera matrix.cameraMatrix
- Output 3x3 floating-point camera matrix
\(A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) . If CV\_CALIB\_USE\_INTRINSIC\_GUESS
and/or CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be
initialized before calling the function.distCoeffs
- Output vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) of
4, 5, 8, 12 or 14 elements.rvecs
- Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view
(e.g. std::vector<cv::Mat>>). That is, each k-th rotation vector together with the corresponding
k-th translation vector (see the next output parameter description) brings the calibration pattern
from the model coordinate space (in which object points are specified) to the world coordinate
space, that is, a real position of the calibration pattern in the k-th pattern view (k=0.. *M* -1).tvecs
- Output vector of translation vectors estimated for each pattern view.stdDeviationsIntrinsics
- Output vector of standard deviations estimated for intrinsic parameters.
Order of deviations values:
\((f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3,
s_4, \tau_x, \tau_y)\) If one of parameters is not estimated, it's deviation is equals to zero.stdDeviationsExtrinsics
- Output vector of standard deviations estimated for extrinsic parameters.
Order of deviations values: \((R_1, T_1, \dotsc , R_M, T_M)\) where M is number of pattern views,
\(R_i, T_i\) are concatenated 1x3 vectors.perViewErrors
- Output vector of the RMS re-projection error estimated for each pattern view.
public static double calibrateCamera(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, int flags, TermCriteria criteria)
public static double calibrateCamera(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, int flags)
public static double calibrateCamera(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs)
public static double calibrateCameraROExtended(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, int iFixedPoint, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat newObjPoints, Mat stdDeviationsIntrinsics, Mat stdDeviationsExtrinsics, Mat stdDeviationsObjPoints, Mat perViewErrors, int flags, TermCriteria criteria)
objectPoints
- Vector of vectors of calibration pattern points in the calibration pattern
coordinate space. See calibrateCamera() for details. If the method of releasing object to be used,
the identical calibration board must be used in each view and it must be fully visible, and all
objectPoints[i] must be the same and all points should be roughly close to a plane. The calibration
target has to be rigid, or at least static if the camera (rather than the calibration target) is
shifted for grabbing images.imagePoints
- Vector of vectors of the projections of calibration pattern points. See
calibrateCamera() for details.imageSize
- Size of the image used only to initialize the intrinsic camera matrix.iFixedPoint
- The index of the 3D object point in objectPoints[0] to be fixed. It also acts as
a switch for calibration method selection. If object-releasing method to be used, pass in the
parameter in the range of [1, objectPoints[0].size()-2], otherwise a value out of this range will
make standard calibration method selected. Usually the top-right corner point of the calibration
board grid is recommended to be fixed when object-releasing method being utilized. According to
\cite strobl2011iccv, two other points are also fixed. In this implementation, objectPoints[0].front
and objectPoints[0].back.z are used. With object-releasing method, accurate rvecs, tvecs and
newObjPoints are only possible if coordinates of these three fixed points are accurate enough.cameraMatrix
- Output 3x3 floating-point camera matrix. See calibrateCamera() for details.distCoeffs
- Output vector of distortion coefficients. See calibrateCamera() for details.rvecs
- Output vector of rotation vectors estimated for each pattern view. See calibrateCamera()
for details.tvecs
- Output vector of translation vectors estimated for each pattern view.newObjPoints
- The updated output vector of calibration pattern points. The coordinates might
be scaled based on three fixed points. The returned coordinates are accurate only if the above
mentioned three fixed points are accurate. If not needed, noArray() can be passed in. This parameter
is ignored with standard calibration method.stdDeviationsIntrinsics
- Output vector of standard deviations estimated for intrinsic parameters.
See calibrateCamera() for details.stdDeviationsExtrinsics
- Output vector of standard deviations estimated for extrinsic parameters.
See calibrateCamera() for details.stdDeviationsObjPoints
- Output vector of standard deviations estimated for refined coordinates
of calibration pattern points. It has the same size and order as objectPoints[0] vector. This
parameter is ignored with standard calibration method.perViewErrors
- Output vector of the RMS re-projection error estimated for each pattern view.flags
- Different flags that may be zero or a combination of some predefined values. See
calibrateCamera() for details. If the method of releasing object is used, the calibration time may
be much longer. CALIB_USE_QR or CALIB_USE_LU could be used for faster calibration with potentially
less precise and less stable in some rare cases.criteria
- Termination criteria for the iterative optimization algorithm.public static double calibrateCameraROExtended(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, int iFixedPoint, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat newObjPoints, Mat stdDeviationsIntrinsics, Mat stdDeviationsExtrinsics, Mat stdDeviationsObjPoints, Mat perViewErrors, int flags)
objectPoints
- Vector of vectors of calibration pattern points in the calibration pattern
coordinate space. See calibrateCamera() for details. If the method of releasing object to be used,
the identical calibration board must be used in each view and it must be fully visible, and all
objectPoints[i] must be the same and all points should be roughly close to a plane. The calibration
target has to be rigid, or at least static if the camera (rather than the calibration target) is
shifted for grabbing images.imagePoints
- Vector of vectors of the projections of calibration pattern points. See
calibrateCamera() for details.imageSize
- Size of the image used only to initialize the intrinsic camera matrix.iFixedPoint
- The index of the 3D object point in objectPoints[0] to be fixed. It also acts as
a switch for calibration method selection. If object-releasing method to be used, pass in the
parameter in the range of [1, objectPoints[0].size()-2], otherwise a value out of this range will
make standard calibration method selected. Usually the top-right corner point of the calibration
board grid is recommended to be fixed when object-releasing method being utilized. According to
\cite strobl2011iccv, two other points are also fixed. In this implementation, objectPoints[0].front
and objectPoints[0].back.z are used. With object-releasing method, accurate rvecs, tvecs and
newObjPoints are only possible if coordinates of these three fixed points are accurate enough.cameraMatrix
- Output 3x3 floating-point camera matrix. See calibrateCamera() for details.distCoeffs
- Output vector of distortion coefficients. See calibrateCamera() for details.rvecs
- Output vector of rotation vectors estimated for each pattern view. See calibrateCamera()
for details.tvecs
- Output vector of translation vectors estimated for each pattern view.newObjPoints
- The updated output vector of calibration pattern points. The coordinates might
be scaled based on three fixed points. The returned coordinates are accurate only if the above
mentioned three fixed points are accurate. If not needed, noArray() can be passed in. This parameter
is ignored with standard calibration method.stdDeviationsIntrinsics
- Output vector of standard deviations estimated for intrinsic parameters.
See calibrateCamera() for details.stdDeviationsExtrinsics
- Output vector of standard deviations estimated for extrinsic parameters.
See calibrateCamera() for details.stdDeviationsObjPoints
- Output vector of standard deviations estimated for refined coordinates
of calibration pattern points. It has the same size and order as objectPoints[0] vector. This
parameter is ignored with standard calibration method.perViewErrors
- Output vector of the RMS re-projection error estimated for each pattern view.flags
- Different flags that may be zero or a combination of some predefined values. See
calibrateCamera() for details. If the method of releasing object is used, the calibration time may
be much longer. CALIB_USE_QR or CALIB_USE_LU could be used for faster calibration with potentially
less precise and less stable in some rare cases.public static double calibrateCameraROExtended(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, int iFixedPoint, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat newObjPoints, Mat stdDeviationsIntrinsics, Mat stdDeviationsExtrinsics, Mat stdDeviationsObjPoints, Mat perViewErrors)
objectPoints
- Vector of vectors of calibration pattern points in the calibration pattern
coordinate space. See calibrateCamera() for details. If the method of releasing object to be used,
the identical calibration board must be used in each view and it must be fully visible, and all
objectPoints[i] must be the same and all points should be roughly close to a plane. The calibration
target has to be rigid, or at least static if the camera (rather than the calibration target) is
shifted for grabbing images.imagePoints
- Vector of vectors of the projections of calibration pattern points. See
calibrateCamera() for details.imageSize
- Size of the image used only to initialize the intrinsic camera matrix.iFixedPoint
- The index of the 3D object point in objectPoints[0] to be fixed. It also acts as
a switch for calibration method selection. If object-releasing method to be used, pass in the
parameter in the range of [1, objectPoints[0].size()-2], otherwise a value out of this range will
make standard calibration method selected. Usually the top-right corner point of the calibration
board grid is recommended to be fixed when object-releasing method being utilized. According to
\cite strobl2011iccv, two other points are also fixed. In this implementation, objectPoints[0].front
and objectPoints[0].back.z are used. With object-releasing method, accurate rvecs, tvecs and
newObjPoints are only possible if coordinates of these three fixed points are accurate enough.cameraMatrix
- Output 3x3 floating-point camera matrix. See calibrateCamera() for details.distCoeffs
- Output vector of distortion coefficients. See calibrateCamera() for details.rvecs
- Output vector of rotation vectors estimated for each pattern view. See calibrateCamera()
for details.tvecs
- Output vector of translation vectors estimated for each pattern view.newObjPoints
- The updated output vector of calibration pattern points. The coordinates might
be scaled based on three fixed points. The returned coordinates are accurate only if the above
mentioned three fixed points are accurate. If not needed, noArray() can be passed in. This parameter
is ignored with standard calibration method.stdDeviationsIntrinsics
- Output vector of standard deviations estimated for intrinsic parameters.
See calibrateCamera() for details.stdDeviationsExtrinsics
- Output vector of standard deviations estimated for extrinsic parameters.
See calibrateCamera() for details.stdDeviationsObjPoints
- Output vector of standard deviations estimated for refined coordinates
of calibration pattern points. It has the same size and order as objectPoints[0] vector. This
parameter is ignored with standard calibration method.perViewErrors
- Output vector of the RMS re-projection error estimated for each pattern view.
calibrateCamera() for details. If the method of releasing object is used, the calibration time may
be much longer. CALIB_USE_QR or CALIB_USE_LU could be used for faster calibration with potentially
less precise and less stable in some rare cases.public static double calibrateCameraRO(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, int iFixedPoint, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat newObjPoints, int flags, TermCriteria criteria)
public static double calibrateCameraRO(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, int iFixedPoint, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat newObjPoints, int flags)
public static double calibrateCameraRO(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, int iFixedPoint, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat newObjPoints)
public static double sampsonDistance(Mat pt1, Mat pt2, Mat F)
pt1
- first homogeneous 2d pointpt2
- second homogeneous 2d pointF
- fundamental matrixpublic static double stereoCalibrateExtended(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat E, Mat F, Mat perViewErrors, int flags, TermCriteria criteria)
objectPoints
- Vector of vectors of the calibration pattern points.imagePoints1
- Vector of vectors of the projections of the calibration pattern points,
observed by the first camera.imagePoints2
- Vector of vectors of the projections of the calibration pattern points,
observed by the second camera.cameraMatrix1
- Input/output first camera matrix:
\(\vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}\) , \(j = 0,\, 1\) . If
any of CALIB_USE_INTRINSIC_GUESS , CALIB_FIX_ASPECT_RATIO ,
CALIB_FIX_INTRINSIC , or CALIB_FIX_FOCAL_LENGTH are specified, some or all of the
matrix components must be initialized. See the flags description for details.distCoeffs1
- Input/output vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) of
4, 5, 8, 12 or 14 elements. The output vector length depends on the flags.cameraMatrix2
- Input/output second camera matrix. The parameter is similar to cameraMatrix1distCoeffs2
- Input/output lens distortion coefficients for the second camera. The parameter
is similar to distCoeffs1 .imageSize
- Size of the image used only to initialize intrinsic camera matrix.R
- Output rotation matrix between the 1st and the 2nd camera coordinate systems.T
- Output translation vector between the coordinate systems of the cameras.E
- Output essential matrix.F
- Output fundamental matrix.perViewErrors
- Output vector of the RMS re-projection error estimated for each pattern view.flags
- Different flags that may be zero or a combination of the following values:
criteria
- Termination criteria for the iterative optimization algorithm.
public static double stereoCalibrateExtended(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat E, Mat F, Mat perViewErrors, int flags)
objectPoints
- Vector of vectors of the calibration pattern points.imagePoints1
- Vector of vectors of the projections of the calibration pattern points,
observed by the first camera.imagePoints2
- Vector of vectors of the projections of the calibration pattern points,
observed by the second camera.cameraMatrix1
- Input/output first camera matrix:
\(\vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}\) , \(j = 0,\, 1\) . If
any of CALIB_USE_INTRINSIC_GUESS , CALIB_FIX_ASPECT_RATIO ,
CALIB_FIX_INTRINSIC , or CALIB_FIX_FOCAL_LENGTH are specified, some or all of the
matrix components must be initialized. See the flags description for details.distCoeffs1
- Input/output vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) of
4, 5, 8, 12 or 14 elements. The output vector length depends on the flags.cameraMatrix2
- Input/output second camera matrix. The parameter is similar to cameraMatrix1distCoeffs2
- Input/output lens distortion coefficients for the second camera. The parameter
is similar to distCoeffs1 .imageSize
- Size of the image used only to initialize intrinsic camera matrix.R
- Output rotation matrix between the 1st and the 2nd camera coordinate systems.T
- Output translation vector between the coordinate systems of the cameras.E
- Output essential matrix.F
- Output fundamental matrix.perViewErrors
- Output vector of the RMS re-projection error estimated for each pattern view.flags
- Different flags that may be zero or a combination of the following values:
public static double stereoCalibrateExtended(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat E, Mat F, Mat perViewErrors)
objectPoints
- Vector of vectors of the calibration pattern points.imagePoints1
- Vector of vectors of the projections of the calibration pattern points,
observed by the first camera.imagePoints2
- Vector of vectors of the projections of the calibration pattern points,
observed by the second camera.cameraMatrix1
- Input/output first camera matrix:
\(\vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}\) , \(j = 0,\, 1\) . If
any of CALIB_USE_INTRINSIC_GUESS , CALIB_FIX_ASPECT_RATIO ,
CALIB_FIX_INTRINSIC , or CALIB_FIX_FOCAL_LENGTH are specified, some or all of the
matrix components must be initialized. See the flags description for details.distCoeffs1
- Input/output vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) of
4, 5, 8, 12 or 14 elements. The output vector length depends on the flags.cameraMatrix2
- Input/output second camera matrix. The parameter is similar to cameraMatrix1distCoeffs2
- Input/output lens distortion coefficients for the second camera. The parameter
is similar to distCoeffs1 .imageSize
- Size of the image used only to initialize intrinsic camera matrix.R
- Output rotation matrix between the 1st and the 2nd camera coordinate systems.T
- Output translation vector between the coordinate systems of the cameras.E
- Output essential matrix.F
- Output fundamental matrix.perViewErrors
- Output vector of the RMS re-projection error estimated for each pattern view.
public static double stereoCalibrate(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat E, Mat F, int flags, TermCriteria criteria)
public static double stereoCalibrate(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat E, Mat F, int flags)
public static double stereoCalibrate(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat E, Mat F)
public static double fisheye_calibrate(List<Mat> objectPoints, List<Mat> imagePoints, Size image_size, Mat K, Mat D, List<Mat> rvecs, List<Mat> tvecs, int flags, TermCriteria criteria)
objectPoints
- vector of vectors of calibration pattern points in the calibration pattern
coordinate space.imagePoints
- vector of vectors of the projections of calibration pattern points.
imagePoints.size() and objectPoints.size() and imagePoints[i].size() must be equal to
objectPoints[i].size() for each i.image_size
- Size of the image used only to initialize the intrinsic camera matrix.K
- Output 3x3 floating-point camera matrix
\(A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) . If
fisheye::CALIB_USE_INTRINSIC_GUESS/ is specified, some or all of fx, fy, cx, cy must be
initialized before calling the function.D
- Output vector of distortion coefficients \((k_1, k_2, k_3, k_4)\).rvecs
- Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view.
That is, each k-th rotation vector together with the corresponding k-th translation vector (see
the next output parameter description) brings the calibration pattern from the model coordinate
space (in which object points are specified) to the world coordinate space, that is, a real
position of the calibration pattern in the k-th pattern view (k=0.. *M* -1).tvecs
- Output vector of translation vectors estimated for each pattern view.flags
- Different flags that may be zero or a combination of the following values:
criteria
- Termination criteria for the iterative optimization algorithm.
public static double fisheye_calibrate(List<Mat> objectPoints, List<Mat> imagePoints, Size image_size, Mat K, Mat D, List<Mat> rvecs, List<Mat> tvecs, int flags)
objectPoints
- vector of vectors of calibration pattern points in the calibration pattern
coordinate space.imagePoints
- vector of vectors of the projections of calibration pattern points.
imagePoints.size() and objectPoints.size() and imagePoints[i].size() must be equal to
objectPoints[i].size() for each i.image_size
- Size of the image used only to initialize the intrinsic camera matrix.K
- Output 3x3 floating-point camera matrix
\(A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) . If
fisheye::CALIB_USE_INTRINSIC_GUESS/ is specified, some or all of fx, fy, cx, cy must be
initialized before calling the function.D
- Output vector of distortion coefficients \((k_1, k_2, k_3, k_4)\).rvecs
- Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view.
That is, each k-th rotation vector together with the corresponding k-th translation vector (see
the next output parameter description) brings the calibration pattern from the model coordinate
space (in which object points are specified) to the world coordinate space, that is, a real
position of the calibration pattern in the k-th pattern view (k=0.. *M* -1).tvecs
- Output vector of translation vectors estimated for each pattern view.flags
- Different flags that may be zero or a combination of the following values:
public static double fisheye_calibrate(List<Mat> objectPoints, List<Mat> imagePoints, Size image_size, Mat K, Mat D, List<Mat> rvecs, List<Mat> tvecs)
objectPoints
- vector of vectors of calibration pattern points in the calibration pattern
coordinate space.imagePoints
- vector of vectors of the projections of calibration pattern points.
imagePoints.size() and objectPoints.size() and imagePoints[i].size() must be equal to
objectPoints[i].size() for each i.image_size
- Size of the image used only to initialize the intrinsic camera matrix.K
- Output 3x3 floating-point camera matrix
\(A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) . If
fisheye::CALIB_USE_INTRINSIC_GUESS/ is specified, some or all of fx, fy, cx, cy must be
initialized before calling the function.D
- Output vector of distortion coefficients \((k_1, k_2, k_3, k_4)\).rvecs
- Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view.
That is, each k-th rotation vector together with the corresponding k-th translation vector (see
the next output parameter description) brings the calibration pattern from the model coordinate
space (in which object points are specified) to the world coordinate space, that is, a real
position of the calibration pattern in the k-th pattern view (k=0.. *M* -1).tvecs
- Output vector of translation vectors estimated for each pattern view.
public static double fisheye_stereoCalibrate(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat T, int flags, TermCriteria criteria)
objectPoints
- Vector of vectors of the calibration pattern points.imagePoints1
- Vector of vectors of the projections of the calibration pattern points,
observed by the first camera.imagePoints2
- Vector of vectors of the projections of the calibration pattern points,
observed by the second camera.K1
- Input/output first camera matrix:
\(\vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}\) , \(j = 0,\, 1\) . If
any of fisheye::CALIB_USE_INTRINSIC_GUESS , fisheye::CALIB_FIX_INTRINSIC are specified,
some or all of the matrix components must be initialized.D1
- Input/output vector of distortion coefficients \((k_1, k_2, k_3, k_4)\) of 4 elements.K2
- Input/output second camera matrix. The parameter is similar to K1 .D2
- Input/output lens distortion coefficients for the second camera. The parameter is
similar to D1 .imageSize
- Size of the image used only to initialize intrinsic camera matrix.R
- Output rotation matrix between the 1st and the 2nd camera coordinate systems.T
- Output translation vector between the coordinate systems of the cameras.flags
- Different flags that may be zero or a combination of the following values:
criteria
- Termination criteria for the iterative optimization algorithm.
public static double fisheye_stereoCalibrate(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat T, int flags)
objectPoints
- Vector of vectors of the calibration pattern points.imagePoints1
- Vector of vectors of the projections of the calibration pattern points,
observed by the first camera.imagePoints2
- Vector of vectors of the projections of the calibration pattern points,
observed by the second camera.K1
- Input/output first camera matrix:
\(\vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}\) , \(j = 0,\, 1\) . If
any of fisheye::CALIB_USE_INTRINSIC_GUESS , fisheye::CALIB_FIX_INTRINSIC are specified,
some or all of the matrix components must be initialized.D1
- Input/output vector of distortion coefficients \((k_1, k_2, k_3, k_4)\) of 4 elements.K2
- Input/output second camera matrix. The parameter is similar to K1 .D2
- Input/output lens distortion coefficients for the second camera. The parameter is
similar to D1 .imageSize
- Size of the image used only to initialize intrinsic camera matrix.R
- Output rotation matrix between the 1st and the 2nd camera coordinate systems.T
- Output translation vector between the coordinate systems of the cameras.flags
- Different flags that may be zero or a combination of the following values:
public static double fisheye_stereoCalibrate(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat T)
objectPoints
- Vector of vectors of the calibration pattern points.imagePoints1
- Vector of vectors of the projections of the calibration pattern points,
observed by the first camera.imagePoints2
- Vector of vectors of the projections of the calibration pattern points,
observed by the second camera.K1
- Input/output first camera matrix:
\(\vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}\) , \(j = 0,\, 1\) . If
any of fisheye::CALIB_USE_INTRINSIC_GUESS , fisheye::CALIB_FIX_INTRINSIC are specified,
some or all of the matrix components must be initialized.D1
- Input/output vector of distortion coefficients \((k_1, k_2, k_3, k_4)\) of 4 elements.K2
- Input/output second camera matrix. The parameter is similar to K1 .D2
- Input/output lens distortion coefficients for the second camera. The parameter is
similar to D1 .imageSize
- Size of the image used only to initialize intrinsic camera matrix.R
- Output rotation matrix between the 1st and the 2nd camera coordinate systems.T
- Output translation vector between the coordinate systems of the cameras.
public static float rectify3Collinear(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Mat cameraMatrix3, Mat distCoeffs3, List<Mat> imgpt1, List<Mat> imgpt3, Size imageSize, Mat R12, Mat T12, Mat R13, Mat T13, Mat R1, Mat R2, Mat R3, Mat P1, Mat P2, Mat P3, Mat Q, double alpha, Size newImgSize, Rect roi1, Rect roi2, int flags)
public static int decomposeHomographyMat(Mat H, Mat K, List<Mat> rotations, List<Mat> translations, List<Mat> normals)
H
- The input homography matrix between two images.K
- The input intrinsic camera calibration matrix.rotations
- Array of rotation matrices.translations
- Array of translation matrices.normals
- Array of plane normal matrices.
This function extracts relative camera motion between two views observing a planar object from the
homography H induced by the plane. The intrinsic camera matrix K must also be provided. The function
may return up to four mathematical solution sets. At least two of the solutions may further be
invalidated if point correspondences are available by applying positive depth constraint (all points
must be in front of the camera). The decomposition method is described in detail in CITE: Malis .public static int estimateAffine3D(Mat src, Mat dst, Mat out, Mat inliers, double ransacThreshold, double confidence)
src
- First input 3D point set containing \((X,Y,Z)\).dst
- Second input 3D point set containing \((x,y,z)\).out
- Output 3D affine transformation matrix \(3 \times 4\) of the form
\(
\begin{bmatrix}
a_{11} & a_{12} & a_{13} & b_1\\
a_{21} & a_{22} & a_{23} & b_2\\
a_{31} & a_{32} & a_{33} & b_3\\
\end{bmatrix}
\)inliers
- Output vector indicating which points are inliers (1-inlier, 0-outlier).ransacThreshold
- Maximum reprojection error in the RANSAC algorithm to consider a point as
an inlier.confidence
- Confidence level, between 0 and 1, for the estimated transformation. Anything
between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
The function estimates an optimal 3D affine transformation between two 3D point sets using the
RANSAC algorithm.public static int estimateAffine3D(Mat src, Mat dst, Mat out, Mat inliers, double ransacThreshold)
src
- First input 3D point set containing \((X,Y,Z)\).dst
- Second input 3D point set containing \((x,y,z)\).out
- Output 3D affine transformation matrix \(3 \times 4\) of the form
\(
\begin{bmatrix}
a_{11} & a_{12} & a_{13} & b_1\\
a_{21} & a_{22} & a_{23} & b_2\\
a_{31} & a_{32} & a_{33} & b_3\\
\end{bmatrix}
\)inliers
- Output vector indicating which points are inliers (1-inlier, 0-outlier).ransacThreshold
- Maximum reprojection error in the RANSAC algorithm to consider a point as
an inlier.
between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
The function estimates an optimal 3D affine transformation between two 3D point sets using the
RANSAC algorithm.public static int estimateAffine3D(Mat src, Mat dst, Mat out, Mat inliers)
src
- First input 3D point set containing \((X,Y,Z)\).dst
- Second input 3D point set containing \((x,y,z)\).out
- Output 3D affine transformation matrix \(3 \times 4\) of the form
\(
\begin{bmatrix}
a_{11} & a_{12} & a_{13} & b_1\\
a_{21} & a_{22} & a_{23} & b_2\\
a_{31} & a_{32} & a_{33} & b_3\\
\end{bmatrix}
\)inliers
- Output vector indicating which points are inliers (1-inlier, 0-outlier).
an inlier.
between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
The function estimates an optimal 3D affine transformation between two 3D point sets using the
RANSAC algorithm.public static int recoverPose(Mat E, Mat points1, Mat points2, Mat R, Mat t, double focal, Point pp, Mat mask)
E
- The input essential matrix.points1
- Array of N 2D points from the first image. The point coordinates should be
floating-point (single or double precision).points2
- Array of the second image points of the same size and format as points1 .R
- Recovered relative rotation.t
- Recovered relative translation.focal
- Focal length of the camera. Note that this function assumes that points1 and points2
are feature points from cameras with same focal length and principal point.pp
- principal point of the camera.mask
- Input/output mask for inliers in points1 and points2.
: If it is not empty, then it marks inliers in points1 and points2 for then given essential
matrix E. Only these inliers will be used to recover pose. In the output mask only inliers
which pass the cheirality check.
This function differs from the one above that it computes camera matrix from focal length and
principal point:
\(K =
\begin{bmatrix}
f & 0 & x_{pp} \\
0 & f & y_{pp} \\
0 & 0 & 1
\end{bmatrix}\)public static int recoverPose(Mat E, Mat points1, Mat points2, Mat R, Mat t, double focal, Point pp)
E
- The input essential matrix.points1
- Array of N 2D points from the first image. The point coordinates should be
floating-point (single or double precision).points2
- Array of the second image points of the same size and format as points1 .R
- Recovered relative rotation.t
- Recovered relative translation.focal
- Focal length of the camera. Note that this function assumes that points1 and points2
are feature points from cameras with same focal length and principal point.pp
- principal point of the camera.
: If it is not empty, then it marks inliers in points1 and points2 for then given essential
matrix E. Only these inliers will be used to recover pose. In the output mask only inliers
which pass the cheirality check.
This function differs from the one above that it computes camera matrix from focal length and
principal point:
\(K =
\begin{bmatrix}
f & 0 & x_{pp} \\
0 & f & y_{pp} \\
0 & 0 & 1
\end{bmatrix}\)public static int recoverPose(Mat E, Mat points1, Mat points2, Mat R, Mat t, double focal)
E
- The input essential matrix.points1
- Array of N 2D points from the first image. The point coordinates should be
floating-point (single or double precision).points2
- Array of the second image points of the same size and format as points1 .R
- Recovered relative rotation.t
- Recovered relative translation.focal
- Focal length of the camera. Note that this function assumes that points1 and points2
are feature points from cameras with same focal length and principal point.
: If it is not empty, then it marks inliers in points1 and points2 for then given essential
matrix E. Only these inliers will be used to recover pose. In the output mask only inliers
which pass the cheirality check.
This function differs from the one above that it computes camera matrix from focal length and
principal point:
\(K =
\begin{bmatrix}
f & 0 & x_{pp} \\
0 & f & y_{pp} \\
0 & 0 & 1
\end{bmatrix}\)public static int recoverPose(Mat E, Mat points1, Mat points2, Mat R, Mat t)
E
- The input essential matrix.points1
- Array of N 2D points from the first image. The point coordinates should be
floating-point (single or double precision).points2
- Array of the second image points of the same size and format as points1 .R
- Recovered relative rotation.t
- Recovered relative translation.
are feature points from cameras with same focal length and principal point.
: If it is not empty, then it marks inliers in points1 and points2 for then given essential
matrix E. Only these inliers will be used to recover pose. In the output mask only inliers
which pass the cheirality check.
This function differs from the one above that it computes camera matrix from focal length and
principal point:
\(K =
\begin{bmatrix}
f & 0 & x_{pp} \\
0 & f & y_{pp} \\
0 & 0 & 1
\end{bmatrix}\)public static int recoverPose(Mat E, Mat points1, Mat points2, Mat cameraMatrix, Mat R, Mat t, Mat mask)
E
- The input essential matrix.points1
- Array of N 2D points from the first image. The point coordinates should be
floating-point (single or double precision).points2
- Array of the second image points of the same size and format as points1 .cameraMatrix
- Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
Note that this function assumes that points1 and points2 are feature points from cameras with the
same camera matrix.R
- Recovered relative rotation.t
- Recovered relative translation.mask
- Input/output mask for inliers in points1 and points2.
: If it is not empty, then it marks inliers in points1 and points2 for then given essential
matrix E. Only these inliers will be used to recover pose. In the output mask only inliers
which pass the cheirality check.
This function decomposes an essential matrix using decomposeEssentialMat and then verifies possible
pose hypotheses by doing cheirality check. The cheirality check basically means that the
triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03 .
This function can be used to process output E and mask from findEssentialMat. In this scenario,
points1 and points2 are the same input for findEssentialMat. :
// Example. Estimation of fundamental matrix using the RANSAC algorithm
int point_count = 100;
vector<Point2f> points1(point_count);
vector<Point2f> points2(point_count);
// initialize the points here ...
for( int i = 0; i < point_count; i++ )
{
points1[i] = ...;
points2[i] = ...;
}
// cametra matrix with both focal lengths = 1, and principal point = (0, 0)
Mat cameraMatrix = Mat::eye(3, 3, CV_64F);
Mat E, R, t, mask;
E = findEssentialMat(points1, points2, cameraMatrix, RANSAC, 0.999, 1.0, mask);
recoverPose(E, points1, points2, cameraMatrix, R, t, mask);
public static int recoverPose(Mat E, Mat points1, Mat points2, Mat cameraMatrix, Mat R, Mat t)
E
- The input essential matrix.points1
- Array of N 2D points from the first image. The point coordinates should be
floating-point (single or double precision).points2
- Array of the second image points of the same size and format as points1 .cameraMatrix
- Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
Note that this function assumes that points1 and points2 are feature points from cameras with the
same camera matrix.R
- Recovered relative rotation.t
- Recovered relative translation.
: If it is not empty, then it marks inliers in points1 and points2 for then given essential
matrix E. Only these inliers will be used to recover pose. In the output mask only inliers
which pass the cheirality check.
This function decomposes an essential matrix using decomposeEssentialMat and then verifies possible
pose hypotheses by doing cheirality check. The cheirality check basically means that the
triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03 .
This function can be used to process output E and mask from findEssentialMat. In this scenario,
points1 and points2 are the same input for findEssentialMat. :
// Example. Estimation of fundamental matrix using the RANSAC algorithm
int point_count = 100;
vector<Point2f> points1(point_count);
vector<Point2f> points2(point_count);
// initialize the points here ...
for( int i = 0; i < point_count; i++ )
{
points1[i] = ...;
points2[i] = ...;
}
// cametra matrix with both focal lengths = 1, and principal point = (0, 0)
Mat cameraMatrix = Mat::eye(3, 3, CV_64F);
Mat E, R, t, mask;
E = findEssentialMat(points1, points2, cameraMatrix, RANSAC, 0.999, 1.0, mask);
recoverPose(E, points1, points2, cameraMatrix, R, t, mask);
public static int recoverPose(Mat E, Mat points1, Mat points2, Mat cameraMatrix, Mat R, Mat t, double distanceThresh, Mat mask, Mat triangulatedPoints)
E
- The input essential matrix.points1
- Array of N 2D points from the first image. The point coordinates should be
floating-point (single or double precision).points2
- Array of the second image points of the same size and format as points1.cameraMatrix
- Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
Note that this function assumes that points1 and points2 are feature points from cameras with the
same camera matrix.R
- Recovered relative rotation.t
- Recovered relative translation.distanceThresh
- threshold distance which is used to filter out far away points (i.e. infinite points).mask
- Input/output mask for inliers in points1 and points2.
: If it is not empty, then it marks inliers in points1 and points2 for then given essential
matrix E. Only these inliers will be used to recover pose. In the output mask only inliers
which pass the cheirality check.triangulatedPoints
- 3d points which were reconstructed by triangulation.public static int recoverPose(Mat E, Mat points1, Mat points2, Mat cameraMatrix, Mat R, Mat t, double distanceThresh, Mat mask)
E
- The input essential matrix.points1
- Array of N 2D points from the first image. The point coordinates should be
floating-point (single or double precision).points2
- Array of the second image points of the same size and format as points1.cameraMatrix
- Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
Note that this function assumes that points1 and points2 are feature points from cameras with the
same camera matrix.R
- Recovered relative rotation.t
- Recovered relative translation.distanceThresh
- threshold distance which is used to filter out far away points (i.e. infinite points).mask
- Input/output mask for inliers in points1 and points2.
: If it is not empty, then it marks inliers in points1 and points2 for then given essential
matrix E. Only these inliers will be used to recover pose. In the output mask only inliers
which pass the cheirality check.public static int recoverPose(Mat E, Mat points1, Mat points2, Mat cameraMatrix, Mat R, Mat t, double distanceThresh)
E
- The input essential matrix.points1
- Array of N 2D points from the first image. The point coordinates should be
floating-point (single or double precision).points2
- Array of the second image points of the same size and format as points1.cameraMatrix
- Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
Note that this function assumes that points1 and points2 are feature points from cameras with the
same camera matrix.R
- Recovered relative rotation.t
- Recovered relative translation.distanceThresh
- threshold distance which is used to filter out far away points (i.e. infinite points).
: If it is not empty, then it marks inliers in points1 and points2 for then given essential
matrix E. Only these inliers will be used to recover pose. In the output mask only inliers
which pass the cheirality check.public static int solveP3P(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, int flags)
objectPoints
- Array of object points in the object coordinate space, 3x3 1-channel or
1x3/3x1 3-channel. vector<Point3f> can be also passed here.imagePoints
- Array of corresponding image points, 3x2 1-channel or 1x3/3x1 2-channel.
vector<Point2f> can be also passed here.cameraMatrix
- Input camera matrix \(A = \vecthreethree{fx}{0}{cx}{0}{fy}{cy}{0}{0}{1}\) .distCoeffs
- Input vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) of
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
assumed.rvecs
- Output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
the model coordinate system to the camera coordinate system. A P3P problem has up to 4 solutions.tvecs
- Output translation vectors.flags
- Method for solving a P3P problem:
public static int solvePnPGeneric(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, boolean useExtrinsicGuess, int flags, Mat rvec, Mat tvec, Mat reprojectionError)
objectPoints
- Array of object points in the object coordinate space, Nx3 1-channel or
1xN/Nx1 3-channel, where N is the number of points. vector<Point3f> can be also passed here.imagePoints
- Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
where N is the number of points. vector<Point2f> can be also passed here.cameraMatrix
- Input camera matrix \(A = \vecthreethree{fx}{0}{cx}{0}{fy}{cy}{0}{0}{1}\) .distCoeffs
- Input vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) of
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
assumed.rvecs
- Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
the model coordinate system to the camera coordinate system.tvecs
- Vector of output translation vectors.useExtrinsicGuess
- Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
the provided rvec and tvec values as initial approximations of the rotation and translation
vectors, respectively, and further optimizes them.flags
- Method for solving a PnP problem:
rvec
- Rotation vector used to initialize an iterative PnP refinement algorithm, when flag is SOLVEPNP_ITERATIVE
and useExtrinsicGuess is set to true.tvec
- Translation vector used to initialize an iterative PnP refinement algorithm, when flag is SOLVEPNP_ITERATIVE
and useExtrinsicGuess is set to true.reprojectionError
- Optional vector of reprojection error, that is the RMS error
(\( \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} \)) between the input image points
and the 3D object points projected with the estimated pose.
rvec
) and the translation (tvec
) vectors that allow transforming
a 3D point expressed in the world frame into the camera frame:
\(
\begin{align*}
\begin{bmatrix}
X_c \\
Y_c \\
Z_c \\
1
\end{bmatrix} &=
\hspace{0.2em} ^{c}\bf{M}_w
\begin{bmatrix}
X_{w} \\
Y_{w} \\
Z_{w} \\
1
\end{bmatrix} \\
\begin{bmatrix}
X_c \\
Y_c \\
Z_c \\
1
\end{bmatrix} &=
\begin{bmatrix}
r_{11} & r_{12} & r_{13} & t_x \\
r_{21} & r_{22} & r_{23} & t_y \\
r_{31} & r_{32} & r_{33} & t_z \\
0 & 0 & 0 & 1
\end{bmatrix}
\begin{bmatrix}
X_{w} \\
Y_{w} \\
Z_{w} \\
1
\end{bmatrix}
\end{align*}
\)
Note:
useExtrinsicGuess=true
, the minimum number of points is 3 (3 points
are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
global solution to converge.
public static int solvePnPGeneric(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, boolean useExtrinsicGuess, int flags, Mat rvec, Mat tvec)
objectPoints
- Array of object points in the object coordinate space, Nx3 1-channel or
1xN/Nx1 3-channel, where N is the number of points. vector<Point3f> can be also passed here.imagePoints
- Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
where N is the number of points. vector<Point2f> can be also passed here.cameraMatrix
- Input camera matrix \(A = \vecthreethree{fx}{0}{cx}{0}{fy}{cy}{0}{0}{1}\) .distCoeffs
- Input vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) of
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
assumed.rvecs
- Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
the model coordinate system to the camera coordinate system.tvecs
- Vector of output translation vectors.useExtrinsicGuess
- Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
the provided rvec and tvec values as initial approximations of the rotation and translation
vectors, respectively, and further optimizes them.flags
- Method for solving a PnP problem:
rvec
- Rotation vector used to initialize an iterative PnP refinement algorithm, when flag is SOLVEPNP_ITERATIVE
and useExtrinsicGuess is set to true.tvec
- Translation vector used to initialize an iterative PnP refinement algorithm, when flag is SOLVEPNP_ITERATIVE
and useExtrinsicGuess is set to true.
(\( \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} \)) between the input image points
and the 3D object points projected with the estimated pose.
rvec
) and the translation (tvec
) vectors that allow transforming
a 3D point expressed in the world frame into the camera frame:
\(
\begin{align*}
\begin{bmatrix}
X_c \\
Y_c \\
Z_c \\
1
\end{bmatrix} &=
\hspace{0.2em} ^{c}\bf{M}_w
\begin{bmatrix}
X_{w} \\
Y_{w} \\
Z_{w} \\
1
\end{bmatrix} \\
\begin{bmatrix}
X_c \\
Y_c \\
Z_c \\
1
\end{bmatrix} &=
\begin{bmatrix}
r_{11} & r_{12} & r_{13} & t_x \\
r_{21} & r_{22} & r_{23} & t_y \\
r_{31} & r_{32} & r_{33} & t_z \\
0 & 0 & 0 & 1
\end{bmatrix}
\begin{bmatrix}
X_{w} \\
Y_{w} \\
Z_{w} \\
1
\end{bmatrix}
\end{align*}
\)
Note:
useExtrinsicGuess=true
, the minimum number of points is 3 (3 points
are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
global solution to converge.
public static int solvePnPGeneric(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, boolean useExtrinsicGuess, int flags, Mat rvec)
objectPoints
- Array of object points in the object coordinate space, Nx3 1-channel or
1xN/Nx1 3-channel, where N is the number of points. vector<Point3f> can be also passed here.imagePoints
- Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
where N is the number of points. vector<Point2f> can be also passed here.cameraMatrix
- Input camera matrix \(A = \vecthreethree{fx}{0}{cx}{0}{fy}{cy}{0}{0}{1}\) .distCoeffs
- Input vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) of
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
assumed.rvecs
- Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
the model coordinate system to the camera coordinate system.tvecs
- Vector of output translation vectors.useExtrinsicGuess
- Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
the provided rvec and tvec values as initial approximations of the rotation and translation
vectors, respectively, and further optimizes them.flags
- Method for solving a PnP problem:
rvec
- Rotation vector used to initialize an iterative PnP refinement algorithm, when flag is SOLVEPNP_ITERATIVE
and useExtrinsicGuess is set to true.
and useExtrinsicGuess is set to true.
(\( \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} \)) between the input image points
and the 3D object points projected with the estimated pose.
rvec
) and the translation (tvec
) vectors that allow transforming
a 3D point expressed in the world frame into the camera frame:
\(
\begin{align*}
\begin{bmatrix}
X_c \\
Y_c \\
Z_c \\
1
\end{bmatrix} &=
\hspace{0.2em} ^{c}\bf{M}_w
\begin{bmatrix}
X_{w} \\
Y_{w} \\
Z_{w} \\
1
\end{bmatrix} \\
\begin{bmatrix}
X_c \\
Y_c \\
Z_c \\
1
\end{bmatrix} &=
\begin{bmatrix}
r_{11} & r_{12} & r_{13} & t_x \\
r_{21} & r_{22} & r_{23} & t_y \\
r_{31} & r_{32} & r_{33} & t_z \\
0 & 0 & 0 & 1
\end{bmatrix}
\begin{bmatrix}
X_{w} \\
Y_{w} \\
Z_{w} \\
1
\end{bmatrix}
\end{align*}
\)
Note:
useExtrinsicGuess=true
, the minimum number of points is 3 (3 points
are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
global solution to converge.
public static int solvePnPGeneric(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, boolean useExtrinsicGuess, int flags)
objectPoints
- Array of object points in the object coordinate space, Nx3 1-channel or
1xN/Nx1 3-channel, where N is the number of points. vector<Point3f> can be also passed here.imagePoints
- Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
where N is the number of points. vector<Point2f> can be also passed here.cameraMatrix
- Input camera matrix \(A = \vecthreethree{fx}{0}{cx}{0}{fy}{cy}{0}{0}{1}\) .distCoeffs
- Input vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) of
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
assumed.rvecs
- Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
the model coordinate system to the camera coordinate system.tvecs
- Vector of output translation vectors.useExtrinsicGuess
- Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
the provided rvec and tvec values as initial approximations of the rotation and translation
vectors, respectively, and further optimizes them.flags
- Method for solving a PnP problem:
rvec
) and the translation (tvec
) vectors that allow transforming
a 3D point expressed in the world frame into the camera frame:
\(
\begin{align*}
\begin{bmatrix}
X_c \\
Y_c \\
Z_c \\
1
\end{bmatrix} &=
\hspace{0.2em} ^{c}\bf{M}_w
\begin{bmatrix}
X_{w} \\
Y_{w} \\
Z_{w} \\
1
\end{bmatrix} \\
\begin{bmatrix}
X_c \\
Y_c \\
Z_c \\
1
\end{bmatrix} &=
\begin{bmatrix}
r_{11} & r_{12} & r_{13} & t_x \\
r_{21} & r_{22} & r_{23} & t_y \\
r_{31} & r_{32} & r_{33} & t_z \\
0 & 0 & 0 & 1
\end{bmatrix}
\begin{bmatrix}
X_{w} \\
Y_{w} \\
Z_{w} \\
1
\end{bmatrix}
\end{align*}
\)
Note:
useExtrinsicGuess=true
, the minimum number of points is 3 (3 points
are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
global solution to converge.
public static int solvePnPGeneric(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, boolean useExtrinsicGuess)
objectPoints
- Array of object points in the object coordinate space, Nx3 1-channel or
1xN/Nx1 3-channel, where N is the number of points. vector<Point3f> can be also passed here.imagePoints
- Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
where N is the number of points. vector<Point2f> can be also passed here.cameraMatrix
- Input camera matrix \(A = \vecthreethree{fx}{0}{cx}{0}{fy}{cy}{0}{0}{1}\) .distCoeffs
- Input vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) of
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
assumed.rvecs
- Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
the model coordinate system to the camera coordinate system.tvecs
- Vector of output translation vectors.useExtrinsicGuess
- Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
the provided rvec and tvec values as initial approximations of the rotation and translation
vectors, respectively, and further optimizes them.
rvec
) and the translation (tvec
) vectors that allow transforming
a 3D point expressed in the world frame into the camera frame:
\(
\begin{align*}
\begin{bmatrix}
X_c \\
Y_c \\
Z_c \\
1
\end{bmatrix} &=
\hspace{0.2em} ^{c}\bf{M}_w
\begin{bmatrix}
X_{w} \\
Y_{w} \\
Z_{w} \\
1
\end{bmatrix} \\
\begin{bmatrix}
X_c \\
Y_c \\
Z_c \\
1
\end{bmatrix} &=
\begin{bmatrix}
r_{11} & r_{12} & r_{13} & t_x \\
r_{21} & r_{22} & r_{23} & t_y \\
r_{31} & r_{32} & r_{33} & t_z \\
0 & 0 & 0 & 1
\end{bmatrix}
\begin{bmatrix}
X_{w} \\
Y_{w} \\
Z_{w} \\
1
\end{bmatrix}
\end{align*}
\)
Note:
useExtrinsicGuess=true
, the minimum number of points is 3 (3 points
are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
global solution to converge.
public static int solvePnPGeneric(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs)
objectPoints
- Array of object points in the object coordinate space, Nx3 1-channel or
1xN/Nx1 3-channel, where N is the number of points. vector<Point3f> can be also passed here.imagePoints
- Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
where N is the number of points. vector<Point2f> can be also passed here.cameraMatrix
- Input camera matrix \(A = \vecthreethree{fx}{0}{cx}{0}{fy}{cy}{0}{0}{1}\) .distCoeffs
- Input vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) of
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
assumed.rvecs
- Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
the model coordinate system to the camera coordinate system.tvecs
- Vector of output translation vectors.
the provided rvec and tvec values as initial approximations of the rotation and translation
vectors, respectively, and further optimizes them.
rvec
) and the translation (tvec
) vectors that allow transforming
a 3D point expressed in the world frame into the camera frame:
\(
\begin{align*}
\begin{bmatrix}
X_c \\
Y_c \\
Z_c \\
1
\end{bmatrix} &=
\hspace{0.2em} ^{c}\bf{M}_w
\begin{bmatrix}
X_{w} \\
Y_{w} \\
Z_{w} \\
1
\end{bmatrix} \\
\begin{bmatrix}
X_c \\
Y_c \\
Z_c \\
1
\end{bmatrix} &=
\begin{bmatrix}
r_{11} & r_{12} & r_{13} & t_x \\
r_{21} & r_{22} & r_{23} & t_y \\
r_{31} & r_{32} & r_{33} & t_z \\
0 & 0 & 0 & 1
\end{bmatrix}
\begin{bmatrix}
X_{w} \\
Y_{w} \\
Z_{w} \\
1
\end{bmatrix}
\end{align*}
\)
Note:
useExtrinsicGuess=true
, the minimum number of points is 3 (3 points
are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
global solution to converge.
public static void Rodrigues(Mat src, Mat dst, Mat jacobian)
src
- Input rotation vector (3x1 or 1x3) or rotation matrix (3x3).dst
- Output rotation matrix (3x3) or rotation vector (3x1 or 1x3), respectively.jacobian
- Optional output Jacobian matrix, 3x9 or 9x3, which is a matrix of partial
derivatives of the output array components with respect to the input array components.
\(\begin{array}{l} \theta \leftarrow norm(r) \\ r \leftarrow r/ \theta \\ R = \cos{\theta} I + (1- \cos{\theta} ) r r^T + \sin{\theta} \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} \end{array}\)
Inverse transformation can be also done easily, since
\(\sin ( \theta ) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} = \frac{R - R^T}{2}\)
A rotation vector is a convenient and most compact representation of a rotation matrix (since any
rotation matrix has just 3 degrees of freedom). The representation is used in the global 3D geometry
optimization procedures like calibrateCamera, stereoCalibrate, or solvePnP .public static void Rodrigues(Mat src, Mat dst)
src
- Input rotation vector (3x1 or 1x3) or rotation matrix (3x3).dst
- Output rotation matrix (3x3) or rotation vector (3x1 or 1x3), respectively.
derivatives of the output array components with respect to the input array components.
\(\begin{array}{l} \theta \leftarrow norm(r) \\ r \leftarrow r/ \theta \\ R = \cos{\theta} I + (1- \cos{\theta} ) r r^T + \sin{\theta} \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} \end{array}\)
Inverse transformation can be also done easily, since
\(\sin ( \theta ) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} = \frac{R - R^T}{2}\)
A rotation vector is a convenient and most compact representation of a rotation matrix (since any
rotation matrix has just 3 degrees of freedom). The representation is used in the global 3D geometry
optimization procedures like calibrateCamera, stereoCalibrate, or solvePnP .public static void calibrateHandEye(List<Mat> R_gripper2base, List<Mat> t_gripper2base, List<Mat> R_target2cam, List<Mat> t_target2cam, Mat R_cam2gripper, Mat t_cam2gripper, int method)
R_gripper2base
- Rotation part extracted from the homogeneous matrix that transforms a point
expressed in the gripper frame to the robot base frame (\(_{}^{b}\textrm{T}_g\)).
This is a vector (vector<Mat>
) that contains the rotation matrices for all the transformations
from gripper frame to robot base frame.t_gripper2base
- Translation part extracted from the homogeneous matrix that transforms a point
expressed in the gripper frame to the robot base frame (\(_{}^{b}\textrm{T}_g\)).
This is a vector (vector<Mat>
) that contains the translation vectors for all the transformations
from gripper frame to robot base frame.R_target2cam
- Rotation part extracted from the homogeneous matrix that transforms a point
expressed in the target frame to the camera frame (\(_{}^{c}\textrm{T}_t\)).
This is a vector (vector<Mat>
) that contains the rotation matrices for all the transformations
from calibration target frame to camera frame.t_target2cam
- Rotation part extracted from the homogeneous matrix that transforms a point
expressed in the target frame to the camera frame (\(_{}^{c}\textrm{T}_t\)).
This is a vector (vector<Mat>
) that contains the translation vectors for all the transformations
from calibration target frame to camera frame.R_cam2gripper
- Estimated rotation part extracted from the homogeneous matrix that transforms a point
expressed in the camera frame to the gripper frame (\(_{}^{g}\textrm{T}_c\)).t_cam2gripper
- Estimated translation part extracted from the homogeneous matrix that transforms a point
expressed in the camera frame to the gripper frame (\(_{}^{g}\textrm{T}_c\)).method
- One of the implemented Hand-Eye calibration method, see cv::HandEyeCalibrationMethod
The function performs the Hand-Eye calibration using various methods. One approach consists in estimating the
rotation then the translation (separable solutions) and the following methods are implemented:
public static void calibrateHandEye(List<Mat> R_gripper2base, List<Mat> t_gripper2base, List<Mat> R_target2cam, List<Mat> t_target2cam, Mat R_cam2gripper, Mat t_cam2gripper)
R_gripper2base
- Rotation part extracted from the homogeneous matrix that transforms a point
expressed in the gripper frame to the robot base frame (\(_{}^{b}\textrm{T}_g\)).
This is a vector (vector<Mat>
) that contains the rotation matrices for all the transformations
from gripper frame to robot base frame.t_gripper2base
- Translation part extracted from the homogeneous matrix that transforms a point
expressed in the gripper frame to the robot base frame (\(_{}^{b}\textrm{T}_g\)).
This is a vector (vector<Mat>
) that contains the translation vectors for all the transformations
from gripper frame to robot base frame.R_target2cam
- Rotation part extracted from the homogeneous matrix that transforms a point
expressed in the target frame to the camera frame (\(_{}^{c}\textrm{T}_t\)).
This is a vector (vector<Mat>
) that contains the rotation matrices for all the transformations
from calibration target frame to camera frame.t_target2cam
- Rotation part extracted from the homogeneous matrix that transforms a point
expressed in the target frame to the camera frame (\(_{}^{c}\textrm{T}_t\)).
This is a vector (vector<Mat>
) that contains the translation vectors for all the transformations
from calibration target frame to camera frame.R_cam2gripper
- Estimated rotation part extracted from the homogeneous matrix that transforms a point
expressed in the camera frame to the gripper frame (\(_{}^{g}\textrm{T}_c\)).t_cam2gripper
- Estimated translation part extracted from the homogeneous matrix that transforms a point
expressed in the camera frame to the gripper frame (\(_{}^{g}\textrm{T}_c\)).
The function performs the Hand-Eye calibration using various methods. One approach consists in estimating the
rotation then the translation (separable solutions) and the following methods are implemented:
public static void calibrationMatrixValues(Mat cameraMatrix, Size imageSize, double apertureWidth, double apertureHeight, double[] fovx, double[] fovy, double[] focalLength, Point principalPoint, double[] aspectRatio)
cameraMatrix
- Input camera matrix that can be estimated by calibrateCamera or
stereoCalibrate .imageSize
- Input image size in pixels.apertureWidth
- Physical width in mm of the sensor.apertureHeight
- Physical height in mm of the sensor.fovx
- Output field of view in degrees along the horizontal sensor axis.fovy
- Output field of view in degrees along the vertical sensor axis.focalLength
- Focal length of the lens in mm.principalPoint
- Principal point in mm.aspectRatio
- \(f_y/f_x\)
The function computes various useful camera characteristics from the previously estimated camera
matrix.
Note:
Do keep in mind that the unity measure 'mm' stands for whatever unit of measure one chooses for
the chessboard pitch (it can thus be any value).public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3, Mat dr3dr1, Mat dr3dt1, Mat dr3dr2, Mat dr3dt2, Mat dt3dr1, Mat dt3dt1, Mat dt3dr2, Mat dt3dt2)
rvec1
- First rotation vector.tvec1
- First translation vector.rvec2
- Second rotation vector.tvec2
- Second translation vector.rvec3
- Output rotation vector of the superposition.tvec3
- Output translation vector of the superposition.dr3dr1
- Optional output derivative of rvec3 with regard to rvec1dr3dt1
- Optional output derivative of rvec3 with regard to tvec1dr3dr2
- Optional output derivative of rvec3 with regard to rvec2dr3dt2
- Optional output derivative of rvec3 with regard to tvec2dt3dr1
- Optional output derivative of tvec3 with regard to rvec1dt3dt1
- Optional output derivative of tvec3 with regard to tvec1dt3dr2
- Optional output derivative of tvec3 with regard to rvec2dt3dt2
- Optional output derivative of tvec3 with regard to tvec2
The functions compute:
\(\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,\)
where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and
\(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See Rodrigues for details.
Also, the functions can compute the derivatives of the output vectors with regards to the input
vectors (see matMulDeriv ). The functions are used inside stereoCalibrate but can also be used in
your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
function that contains a matrix multiplication.public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3, Mat dr3dr1, Mat dr3dt1, Mat dr3dr2, Mat dr3dt2, Mat dt3dr1, Mat dt3dt1, Mat dt3dr2)
rvec1
- First rotation vector.tvec1
- First translation vector.rvec2
- Second rotation vector.tvec2
- Second translation vector.rvec3
- Output rotation vector of the superposition.tvec3
- Output translation vector of the superposition.dr3dr1
- Optional output derivative of rvec3 with regard to rvec1dr3dt1
- Optional output derivative of rvec3 with regard to tvec1dr3dr2
- Optional output derivative of rvec3 with regard to rvec2dr3dt2
- Optional output derivative of rvec3 with regard to tvec2dt3dr1
- Optional output derivative of tvec3 with regard to rvec1dt3dt1
- Optional output derivative of tvec3 with regard to tvec1dt3dr2
- Optional output derivative of tvec3 with regard to rvec2
The functions compute:
\(\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,\)
where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and
\(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See Rodrigues for details.
Also, the functions can compute the derivatives of the output vectors with regards to the input
vectors (see matMulDeriv ). The functions are used inside stereoCalibrate but can also be used in
your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
function that contains a matrix multiplication.public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3, Mat dr3dr1, Mat dr3dt1, Mat dr3dr2, Mat dr3dt2, Mat dt3dr1, Mat dt3dt1)
rvec1
- First rotation vector.tvec1
- First translation vector.rvec2
- Second rotation vector.tvec2
- Second translation vector.rvec3
- Output rotation vector of the superposition.tvec3
- Output translation vector of the superposition.dr3dr1
- Optional output derivative of rvec3 with regard to rvec1dr3dt1
- Optional output derivative of rvec3 with regard to tvec1dr3dr2
- Optional output derivative of rvec3 with regard to rvec2dr3dt2
- Optional output derivative of rvec3 with regard to tvec2dt3dr1
- Optional output derivative of tvec3 with regard to rvec1dt3dt1
- Optional output derivative of tvec3 with regard to tvec1
The functions compute:
\(\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,\)
where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and
\(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See Rodrigues for details.
Also, the functions can compute the derivatives of the output vectors with regards to the input
vectors (see matMulDeriv ). The functions are used inside stereoCalibrate but can also be used in
your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
function that contains a matrix multiplication.public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3, Mat dr3dr1, Mat dr3dt1, Mat dr3dr2, Mat dr3dt2, Mat dt3dr1)
rvec1
- First rotation vector.tvec1
- First translation vector.rvec2
- Second rotation vector.tvec2
- Second translation vector.rvec3
- Output rotation vector of the superposition.tvec3
- Output translation vector of the superposition.dr3dr1
- Optional output derivative of rvec3 with regard to rvec1dr3dt1
- Optional output derivative of rvec3 with regard to tvec1dr3dr2
- Optional output derivative of rvec3 with regard to rvec2dr3dt2
- Optional output derivative of rvec3 with regard to tvec2dt3dr1
- Optional output derivative of tvec3 with regard to rvec1
The functions compute:
\(\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,\)
where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and
\(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See Rodrigues for details.
Also, the functions can compute the derivatives of the output vectors with regards to the input
vectors (see matMulDeriv ). The functions are used inside stereoCalibrate but can also be used in
your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
function that contains a matrix multiplication.public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3, Mat dr3dr1, Mat dr3dt1, Mat dr3dr2, Mat dr3dt2)
rvec1
- First rotation vector.tvec1
- First translation vector.rvec2
- Second rotation vector.tvec2
- Second translation vector.rvec3
- Output rotation vector of the superposition.tvec3
- Output translation vector of the superposition.dr3dr1
- Optional output derivative of rvec3 with regard to rvec1dr3dt1
- Optional output derivative of rvec3 with regard to tvec1dr3dr2
- Optional output derivative of rvec3 with regard to rvec2dr3dt2
- Optional output derivative of rvec3 with regard to tvec2
The functions compute:
\(\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,\)
where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and
\(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See Rodrigues for details.
Also, the functions can compute the derivatives of the output vectors with regards to the input
vectors (see matMulDeriv ). The functions are used inside stereoCalibrate but can also be used in
your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
function that contains a matrix multiplication.public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3, Mat dr3dr1, Mat dr3dt1, Mat dr3dr2)
rvec1
- First rotation vector.tvec1
- First translation vector.rvec2
- Second rotation vector.tvec2
- Second translation vector.rvec3
- Output rotation vector of the superposition.tvec3
- Output translation vector of the superposition.dr3dr1
- Optional output derivative of rvec3 with regard to rvec1dr3dt1
- Optional output derivative of rvec3 with regard to tvec1dr3dr2
- Optional output derivative of rvec3 with regard to rvec2
The functions compute:
\(\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,\)
where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and
\(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See Rodrigues for details.
Also, the functions can compute the derivatives of the output vectors with regards to the input
vectors (see matMulDeriv ). The functions are used inside stereoCalibrate but can also be used in
your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
function that contains a matrix multiplication.public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3, Mat dr3dr1, Mat dr3dt1)
rvec1
- First rotation vector.tvec1
- First translation vector.rvec2
- Second rotation vector.tvec2
- Second translation vector.rvec3
- Output rotation vector of the superposition.tvec3
- Output translation vector of the superposition.dr3dr1
- Optional output derivative of rvec3 with regard to rvec1dr3dt1
- Optional output derivative of rvec3 with regard to tvec1
The functions compute:
\(\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,\)
where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and
\(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See Rodrigues for details.
Also, the functions can compute the derivatives of the output vectors with regards to the input
vectors (see matMulDeriv ). The functions are used inside stereoCalibrate but can also be used in
your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
function that contains a matrix multiplication.public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3, Mat dr3dr1)
rvec1
- First rotation vector.tvec1
- First translation vector.rvec2
- Second rotation vector.tvec2
- Second translation vector.rvec3
- Output rotation vector of the superposition.tvec3
- Output translation vector of the superposition.dr3dr1
- Optional output derivative of rvec3 with regard to rvec1
The functions compute:
\(\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,\)
where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and
\(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See Rodrigues for details.
Also, the functions can compute the derivatives of the output vectors with regards to the input
vectors (see matMulDeriv ). The functions are used inside stereoCalibrate but can also be used in
your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
function that contains a matrix multiplication.public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3)
rvec1
- First rotation vector.tvec1
- First translation vector.rvec2
- Second rotation vector.tvec2
- Second translation vector.rvec3
- Output rotation vector of the superposition.tvec3
- Output translation vector of the superposition.
The functions compute:
\(\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,\)
where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and
\(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See Rodrigues for details.
Also, the functions can compute the derivatives of the output vectors with regards to the input
vectors (see matMulDeriv ). The functions are used inside stereoCalibrate but can also be used in
your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
function that contains a matrix multiplication.public static void computeCorrespondEpilines(Mat points, int whichImage, Mat F, Mat lines)
points
- Input points. \(N \times 1\) or \(1 \times N\) matrix of type CV_32FC2 or
vector<Point2f> .whichImage
- Index of the image (1 or 2) that contains the points .F
- Fundamental matrix that can be estimated using findFundamentalMat or stereoRectify .lines
- Output vector of the epipolar lines corresponding to the points in the other image.
Each line \(ax + by + c=0\) is encoded by 3 numbers \((a, b, c)\) .
For every point in one of the two images of a stereo pair, the function finds the equation of the
corresponding epipolar line in the other image.
From the fundamental matrix definition (see findFundamentalMat ), line \(l^{(2)}_i\) in the second
image for the point \(p^{(1)}_i\) in the first image (when whichImage=1 ) is computed as:
\(l^{(2)}_i = F p^{(1)}_i\)
And vice versa, when whichImage=2, \(l^{(1)}_i\) is computed from \(p^{(2)}_i\) as:
\(l^{(1)}_i = F^T p^{(2)}_i\)
Line coefficients are defined up to a scale. They are normalized so that \(a_i^2+b_i^2=1\) .public static void convertPointsFromHomogeneous(Mat src, Mat dst)
src
- Input vector of N-dimensional points.dst
- Output vector of N-1-dimensional points.
The function converts points homogeneous to Euclidean space using perspective projection. That is,
each point (x1, x2, ... x(n-1), xn) is converted to (x1/xn, x2/xn, ..., x(n-1)/xn). When xn=0, the
output point coordinates will be (0,0,0,...).public static void convertPointsToHomogeneous(Mat src, Mat dst)
src
- Input vector of N-dimensional points.dst
- Output vector of N+1-dimensional points.
The function converts points from Euclidean to homogeneous space by appending 1's to the tuple of
point coordinates. That is, each point (x1, x2, ..., xn) is converted to (x1, x2, ..., xn, 1).public static void correctMatches(Mat F, Mat points1, Mat points2, Mat newPoints1, Mat newPoints2)
F
- 3x3 fundamental matrix.points1
- 1xN array containing the first set of points.points2
- 1xN array containing the second set of points.newPoints1
- The optimized points1.newPoints2
- The optimized points2.
The function implements the Optimal Triangulation Method (see Multiple View Geometry for details).
For each given point correspondence points1[i] <-> points2[i], and a fundamental matrix F, it
computes the corrected correspondences newPoints1[i] <-> newPoints2[i] that minimize the geometric
error \(d(points1[i], newPoints1[i])^2 + d(points2[i],newPoints2[i])^2\) (where \(d(a,b)\) is the
geometric distance between points \(a\) and \(b\) ) subject to the epipolar constraint
\(newPoints2^T * F * newPoints1 = 0\) .public static void decomposeEssentialMat(Mat E, Mat R1, Mat R2, Mat t)
E
- The input essential matrix.R1
- One possible rotation matrix.R2
- Another possible rotation matrix.t
- One possible translation.
This function decompose an essential matrix E using svd decomposition CITE: HartleyZ00 . Generally 4
possible poses exists for a given E. They are \([R_1, t]\), \([R_1, -t]\), \([R_2, t]\), \([R_2, -t]\). By
decomposing E, you can only get the direction of the translation, so the function returns unit t.public static void decomposeProjectionMatrix(Mat projMatrix, Mat cameraMatrix, Mat rotMatrix, Mat transVect, Mat rotMatrixX, Mat rotMatrixY, Mat rotMatrixZ, Mat eulerAngles)
projMatrix
- 3x4 input projection matrix P.cameraMatrix
- Output 3x3 camera matrix K.rotMatrix
- Output 3x3 external rotation matrix R.transVect
- Output 4x1 translation vector T.rotMatrixX
- Optional 3x3 rotation matrix around x-axis.rotMatrixY
- Optional 3x3 rotation matrix around y-axis.rotMatrixZ
- Optional 3x3 rotation matrix around z-axis.eulerAngles
- Optional three-element vector containing three Euler angles of rotation in
degrees.
The function computes a decomposition of a projection matrix into a calibration and a rotation
matrix and the position of a camera.
It optionally returns three rotation matrices, one for each axis, and three Euler angles that could
be used in OpenGL. Note, there is always more than one sequence of rotations about the three
principal axes that results in the same orientation of an object, e.g. see CITE: Slabaugh . Returned
tree rotation matrices and corresponding three Euler angles are only one of the possible solutions.
The function is based on RQDecomp3x3 .public static void decomposeProjectionMatrix(Mat projMatrix, Mat cameraMatrix, Mat rotMatrix, Mat transVect, Mat rotMatrixX, Mat rotMatrixY, Mat rotMatrixZ)
projMatrix
- 3x4 input projection matrix P.cameraMatrix
- Output 3x3 camera matrix K.rotMatrix
- Output 3x3 external rotation matrix R.transVect
- Output 4x1 translation vector T.rotMatrixX
- Optional 3x3 rotation matrix around x-axis.rotMatrixY
- Optional 3x3 rotation matrix around y-axis.rotMatrixZ
- Optional 3x3 rotation matrix around z-axis.
degrees.
The function computes a decomposition of a projection matrix into a calibration and a rotation
matrix and the position of a camera.
It optionally returns three rotation matrices, one for each axis, and three Euler angles that could
be used in OpenGL. Note, there is always more than one sequence of rotations about the three
principal axes that results in the same orientation of an object, e.g. see CITE: Slabaugh . Returned
tree rotation matrices and corresponding three Euler angles are only one of the possible solutions.
The function is based on RQDecomp3x3 .public static void decomposeProjectionMatrix(Mat projMatrix, Mat cameraMatrix, Mat rotMatrix, Mat transVect, Mat rotMatrixX, Mat rotMatrixY)
projMatrix
- 3x4 input projection matrix P.cameraMatrix
- Output 3x3 camera matrix K.rotMatrix
- Output 3x3 external rotation matrix R.transVect
- Output 4x1 translation vector T.rotMatrixX
- Optional 3x3 rotation matrix around x-axis.rotMatrixY
- Optional 3x3 rotation matrix around y-axis.
degrees.
The function computes a decomposition of a projection matrix into a calibration and a rotation
matrix and the position of a camera.
It optionally returns three rotation matrices, one for each axis, and three Euler angles that could
be used in OpenGL. Note, there is always more than one sequence of rotations about the three
principal axes that results in the same orientation of an object, e.g. see CITE: Slabaugh . Returned
tree rotation matrices and corresponding three Euler angles are only one of the possible solutions.
The function is based on RQDecomp3x3 .public static void decomposeProjectionMatrix(Mat projMatrix, Mat cameraMatrix, Mat rotMatrix, Mat transVect, Mat rotMatrixX)
projMatrix
- 3x4 input projection matrix P.cameraMatrix
- Output 3x3 camera matrix K.rotMatrix
- Output 3x3 external rotation matrix R.transVect
- Output 4x1 translation vector T.rotMatrixX
- Optional 3x3 rotation matrix around x-axis.
degrees.
The function computes a decomposition of a projection matrix into a calibration and a rotation
matrix and the position of a camera.
It optionally returns three rotation matrices, one for each axis, and three Euler angles that could
be used in OpenGL. Note, there is always more than one sequence of rotations about the three
principal axes that results in the same orientation of an object, e.g. see CITE: Slabaugh . Returned
tree rotation matrices and corresponding three Euler angles are only one of the possible solutions.
The function is based on RQDecomp3x3 .public static void decomposeProjectionMatrix(Mat projMatrix, Mat cameraMatrix, Mat rotMatrix, Mat transVect)
projMatrix
- 3x4 input projection matrix P.cameraMatrix
- Output 3x3 camera matrix K.rotMatrix
- Output 3x3 external rotation matrix R.transVect
- Output 4x1 translation vector T.
degrees.
The function computes a decomposition of a projection matrix into a calibration and a rotation
matrix and the position of a camera.
It optionally returns three rotation matrices, one for each axis, and three Euler angles that could
be used in OpenGL. Note, there is always more than one sequence of rotations about the three
principal axes that results in the same orientation of an object, e.g. see CITE: Slabaugh . Returned
tree rotation matrices and corresponding three Euler angles are only one of the possible solutions.
The function is based on RQDecomp3x3 .public static void drawChessboardCorners(Mat image, Size patternSize, MatOfPoint2f corners, boolean patternWasFound)
image
- Destination image. It must be an 8-bit color image.patternSize
- Number of inner corners per a chessboard row and column
(patternSize = cv::Size(points_per_row,points_per_column)).corners
- Array of detected corners, the output of findChessboardCorners.patternWasFound
- Parameter indicating whether the complete board was found or not. The
return value of findChessboardCorners should be passed here.
The function draws individual chessboard corners detected either as red circles if the board was not
found, or as colored corners connected with lines if the board was found.public static void drawFrameAxes(Mat image, Mat cameraMatrix, Mat distCoeffs, Mat rvec, Mat tvec, float length, int thickness)
image
- Input/output image. It must have 1 or 3 channels. The number of channels is not altered.cameraMatrix
- Input 3x3 floating-point matrix of camera intrinsic parameters.
\(A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\)distCoeffs
- Input vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) of
4, 5, 8, 12 or 14 elements. If the vector is empty, the zero distortion coefficients are assumed.rvec
- Rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
the model coordinate system to the camera coordinate system.tvec
- Translation vector.length
- Length of the painted axes in the same unit than tvec (usually in meters).thickness
- Line thickness of the painted axes.
This function draws the axes of the world/object coordinate system w.r.t. to the camera frame.
OX is drawn in red, OY in green and OZ in blue.public static void drawFrameAxes(Mat image, Mat cameraMatrix, Mat distCoeffs, Mat rvec, Mat tvec, float length)
image
- Input/output image. It must have 1 or 3 channels. The number of channels is not altered.cameraMatrix
- Input 3x3 floating-point matrix of camera intrinsic parameters.
\(A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\)distCoeffs
- Input vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) of
4, 5, 8, 12 or 14 elements. If the vector is empty, the zero distortion coefficients are assumed.rvec
- Rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
the model coordinate system to the camera coordinate system.tvec
- Translation vector.length
- Length of the painted axes in the same unit than tvec (usually in meters).
This function draws the axes of the world/object coordinate system w.r.t. to the camera frame.
OX is drawn in red, OY in green and OZ in blue.public static void filterHomographyDecompByVisibleRefpoints(List<Mat> rotations, List<Mat> normals, Mat beforePoints, Mat afterPoints, Mat possibleSolutions, Mat pointsMask)
rotations
- Vector of rotation matrices.normals
- Vector of plane normal matrices.beforePoints
- Vector of (rectified) visible reference points before the homography is appliedafterPoints
- Vector of (rectified) visible reference points after the homography is appliedpossibleSolutions
- Vector of int indices representing the viable solution set after filteringpointsMask
- optional Mat/Vector of 8u type representing the mask for the inliers as given by the findHomography function
This function is intended to filter the output of the decomposeHomographyMat based on additional
information as described in CITE: Malis . The summary of the method: the decomposeHomographyMat function
returns 2 unique solutions and their "opposites" for a total of 4 solutions. If we have access to the
sets of points visible in the camera frame before and after the homography transformation is applied,
we can determine which are the true potential solutions and which are the opposites by verifying which
homographies are consistent with all visible reference points being in front of the camera. The inputs
are left unchanged; the filtered solution set is returned as indices into the existing one.public static void filterHomographyDecompByVisibleRefpoints(List<Mat> rotations, List<Mat> normals, Mat beforePoints, Mat afterPoints, Mat possibleSolutions)
rotations
- Vector of rotation matrices.normals
- Vector of plane normal matrices.beforePoints
- Vector of (rectified) visible reference points before the homography is appliedafterPoints
- Vector of (rectified) visible reference points after the homography is appliedpossibleSolutions
- Vector of int indices representing the viable solution set after filtering
This function is intended to filter the output of the decomposeHomographyMat based on additional
information as described in CITE: Malis . The summary of the method: the decomposeHomographyMat function
returns 2 unique solutions and their "opposites" for a total of 4 solutions. If we have access to the
sets of points visible in the camera frame before and after the homography transformation is applied,
we can determine which are the true potential solutions and which are the opposites by verifying which
homographies are consistent with all visible reference points being in front of the camera. The inputs
are left unchanged; the filtered solution set is returned as indices into the existing one.public static void filterSpeckles(Mat img, double newVal, int maxSpeckleSize, double maxDiff, Mat buf)
img
- The input 16-bit signed disparity imagenewVal
- The disparity value used to paint-off the specklesmaxSpeckleSize
- The maximum speckle size to consider it a speckle. Larger blobs are not
affected by the algorithmmaxDiff
- Maximum difference between neighbor disparity pixels to put them into the same
blob. Note that since StereoBM, StereoSGBM and may be other algorithms return a fixed-point
disparity map, where disparity values are multiplied by 16, this scale factor should be taken into
account when specifying this parameter value.buf
- The optional temporary buffer to avoid memory allocation within the function.public static void filterSpeckles(Mat img, double newVal, int maxSpeckleSize, double maxDiff)
img
- The input 16-bit signed disparity imagenewVal
- The disparity value used to paint-off the specklesmaxSpeckleSize
- The maximum speckle size to consider it a speckle. Larger blobs are not
affected by the algorithmmaxDiff
- Maximum difference between neighbor disparity pixels to put them into the same
blob. Note that since StereoBM, StereoSGBM and may be other algorithms return a fixed-point
disparity map, where disparity values are multiplied by 16, this scale factor should be taken into
account when specifying this parameter value.public static void initUndistortRectifyMap(Mat cameraMatrix, Mat distCoeffs, Mat R, Mat newCameraMatrix, Size size, int m1type, Mat map1, Mat map2)
cameraMatrix
- Input camera matrix \(A=\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .distCoeffs
- Input vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.R
- Optional rectification transformation in the object space (3x3 matrix). R1 or R2 ,
computed by #stereoRectify can be passed here. If the matrix is empty, the identity transformation
is assumed. In cvInitUndistortMap R assumed to be an identity matrix.newCameraMatrix
- New camera matrix \(A'=\vecthreethree{f_x'}{0}{c_x'}{0}{f_y'}{c_y'}{0}{0}{1}\).size
- Undistorted image size.m1type
- Type of the first output map that can be CV_32FC1, CV_32FC2 or CV_16SC2, see #convertMapsmap1
- The first output map.map2
- The second output map.public static void matMulDeriv(Mat A, Mat B, Mat dABdA, Mat dABdB)
A
- First multiplied matrix.B
- Second multiplied matrix.dABdA
- First output derivative matrix d(A\*B)/dA of size
\(\texttt{A.rows*B.cols} \times {A.rows*A.cols}\) .dABdB
- Second output derivative matrix d(A\*B)/dB of size
\(\texttt{A.rows*B.cols} \times {B.rows*B.cols}\) .
The function computes partial derivatives of the elements of the matrix product \(A*B\) with regard to
the elements of each of the two input matrices. The function is used to compute the Jacobian
matrices in stereoCalibrate but can also be used in any other similar optimization function.public static void projectPoints(MatOfPoint3f objectPoints, Mat rvec, Mat tvec, Mat cameraMatrix, MatOfDouble distCoeffs, MatOfPoint2f imagePoints, Mat jacobian, double aspectRatio)
objectPoints
- Array of object points, 3xN/Nx3 1-channel or 1xN/Nx1 3-channel (or
vector<Point3f> ), where N is the number of points in the view.rvec
- Rotation vector. See Rodrigues for details.tvec
- Translation vector.cameraMatrix
- Camera matrix \(A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\) .distCoeffs
- Input vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) of
4, 5, 8, 12 or 14 elements. If the vector is empty, the zero distortion coefficients are assumed.imagePoints
- Output array of image points, 1xN/Nx1 2-channel, or
vector<Point2f> .jacobian
- Optional output 2Nx(10+<numDistCoeffs>) jacobian matrix of derivatives of image
points with respect to components of the rotation vector, translation vector, focal lengths,
coordinates of the principal point and the distortion coefficients. In the old interface different
components of the jacobian are returned via different output parameters.aspectRatio
- Optional "fixed aspect ratio" parameter. If the parameter is not 0, the
function assumes that the aspect ratio (*fx/fy*) is fixed and correspondingly adjusts the jacobian
matrix.
The function computes projections of 3D points to the image plane given intrinsic and extrinsic
camera parameters. Optionally, the function computes Jacobians - matrices of partial derivatives of
image points coordinates (as functions of all the input parameters) with respect to the particular
parameters, intrinsic and/or extrinsic. The Jacobians are used during the global optimization in
calibrateCamera, solvePnP, and stereoCalibrate . The function itself can also be used to compute a
re-projection error given the current intrinsic and extrinsic parameters.
Note: By setting rvec=tvec=(0,0,0) or by setting cameraMatrix to a 3x3 identity matrix, or by
passing zero distortion coefficients, you can get various useful partial cases of the function. This
means that you can compute the distorted coordinates for a sparse set of points or apply a
perspective transformation (and also compute the derivatives) in the ideal zero-distortion setup.public static void projectPoints(MatOfPoint3f objectPoints, Mat rvec, Mat tvec, Mat cameraMatrix, MatOfDouble distCoeffs, MatOfPoint2f imagePoints, Mat jacobian)
objectPoints
- Array of object points, 3xN/Nx3 1-channel or 1xN/Nx1 3-channel (or
vector<Point3f> ), where N is the number of points in the view.rvec
- Rotation vector. See Rodrigues for details.tvec
- Translation vector.cameraMatrix
- Camera matrix \(A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\) .distCoeffs
- Input vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) of
4, 5, 8, 12 or 14 elements. If the vector is empty, the zero distortion coefficients are assumed.imagePoints
- Output array of image points, 1xN/Nx1 2-channel, or
vector<Point2f> .jacobian
- Optional output 2Nx(10+<numDistCoeffs>) jacobian matrix of derivatives of image
points with respect to components of the rotation vector, translation vector, focal lengths,
coordinates of the principal point and the distortion coefficients. In the old interface different
components of the jacobian are returned via different output parameters.
function assumes that the aspect ratio (*fx/fy*) is fixed and correspondingly adjusts the jacobian
matrix.
The function computes projections of 3D points to the image plane given intrinsic and extrinsic
camera parameters. Optionally, the function computes Jacobians - matrices of partial derivatives of
image points coordinates (as functions of all the input parameters) with respect to the particular
parameters, intrinsic and/or extrinsic. The Jacobians are used during the global optimization in
calibrateCamera, solvePnP, and stereoCalibrate . The function itself can also be used to compute a
re-projection error given the current intrinsic and extrinsic parameters.
Note: By setting rvec=tvec=(0,0,0) or by setting cameraMatrix to a 3x3 identity matrix, or by
passing zero distortion coefficients, you can get various useful partial cases of the function. This
means that you can compute the distorted coordinates for a sparse set of points or apply a
perspective transformation (and also compute the derivatives) in the ideal zero-distortion setup.public static void projectPoints(MatOfPoint3f objectPoints, Mat rvec, Mat tvec, Mat cameraMatrix, MatOfDouble distCoeffs, MatOfPoint2f imagePoints)
objectPoints
- Array of object points, 3xN/Nx3 1-channel or 1xN/Nx1 3-channel (or
vector<Point3f> ), where N is the number of points in the view.rvec
- Rotation vector. See Rodrigues for details.tvec
- Translation vector.cameraMatrix
- Camera matrix \(A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\) .distCoeffs
- Input vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) of
4, 5, 8, 12 or 14 elements. If the vector is empty, the zero distortion coefficients are assumed.imagePoints
- Output array of image points, 1xN/Nx1 2-channel, or
vector<Point2f> .
points with respect to components of the rotation vector, translation vector, focal lengths,
coordinates of the principal point and the distortion coefficients. In the old interface different
components of the jacobian are returned via different output parameters.
function assumes that the aspect ratio (*fx/fy*) is fixed and correspondingly adjusts the jacobian
matrix.
The function computes projections of 3D points to the image plane given intrinsic and extrinsic
camera parameters. Optionally, the function computes Jacobians - matrices of partial derivatives of
image points coordinates (as functions of all the input parameters) with respect to the particular
parameters, intrinsic and/or extrinsic. The Jacobians are used during the global optimization in
calibrateCamera, solvePnP, and stereoCalibrate . The function itself can also be used to compute a
re-projection error given the current intrinsic and extrinsic parameters.
Note: By setting rvec=tvec=(0,0,0) or by setting cameraMatrix to a 3x3 identity matrix, or by
passing zero distortion coefficients, you can get various useful partial cases of the function. This
means that you can compute the distorted coordinates for a sparse set of points or apply a
perspective transformation (and also compute the derivatives) in the ideal zero-distortion setup.public static void reprojectImageTo3D(Mat disparity, Mat _3dImage, Mat Q, boolean handleMissingValues, int ddepth)
disparity
- Input single-channel 8-bit unsigned, 16-bit signed, 32-bit signed or 32-bit
floating-point disparity image. If 16-bit signed format is used, the values are assumed to have no
fractional bits._3dImage
- Output 3-channel floating-point image of the same size as disparity . Each
element of _3dImage(x,y) contains 3D coordinates of the point (x,y) computed from the disparity
map.Q
- \(4 \times 4\) perspective transformation matrix that can be obtained with stereoRectify.handleMissingValues
- Indicates, whether the function should handle missing values (i.e.
points where the disparity was not computed). If handleMissingValues=true, then pixels with the
minimal disparity that corresponds to the outliers (see StereoMatcher::compute ) are transformed
to 3D points with a very large Z value (currently set to 10000).ddepth
- The optional output array depth. If it is -1, the output image will have CV_32F
depth. ddepth can also be set to CV_16S, CV_32S or CV_32F.
The function transforms a single-channel disparity map to a 3-channel image representing a 3D
surface. That is, for each pixel (x,y) and the corresponding disparity d=disparity(x,y) , it
computes:
\(\begin{array}{l} [X \; Y \; Z \; W]^T = \texttt{Q} *[x \; y \; \texttt{disparity} (x,y) \; 1]^T \\ \texttt{\_3dImage} (x,y) = (X/W, \; Y/W, \; Z/W) \end{array}\)
The matrix Q can be an arbitrary \(4 \times 4\) matrix (for example, the one computed by
stereoRectify). To reproject a sparse set of points {(x,y,d),...} to 3D space, use
perspectiveTransform .public static void reprojectImageTo3D(Mat disparity, Mat _3dImage, Mat Q, boolean handleMissingValues)
disparity
- Input single-channel 8-bit unsigned, 16-bit signed, 32-bit signed or 32-bit
floating-point disparity image. If 16-bit signed format is used, the values are assumed to have no
fractional bits._3dImage
- Output 3-channel floating-point image of the same size as disparity . Each
element of _3dImage(x,y) contains 3D coordinates of the point (x,y) computed from the disparity
map.Q
- \(4 \times 4\) perspective transformation matrix that can be obtained with stereoRectify.handleMissingValues
- Indicates, whether the function should handle missing values (i.e.
points where the disparity was not computed). If handleMissingValues=true, then pixels with the
minimal disparity that corresponds to the outliers (see StereoMatcher::compute ) are transformed
to 3D points with a very large Z value (currently set to 10000).
depth. ddepth can also be set to CV_16S, CV_32S or CV_32F.
The function transforms a single-channel disparity map to a 3-channel image representing a 3D
surface. That is, for each pixel (x,y) and the corresponding disparity d=disparity(x,y) , it
computes:
\(\begin{array}{l} [X \; Y \; Z \; W]^T = \texttt{Q} *[x \; y \; \texttt{disparity} (x,y) \; 1]^T \\ \texttt{\_3dImage} (x,y) = (X/W, \; Y/W, \; Z/W) \end{array}\)
The matrix Q can be an arbitrary \(4 \times 4\) matrix (for example, the one computed by
stereoRectify). To reproject a sparse set of points {(x,y,d),...} to 3D space, use
perspectiveTransform .public static void reprojectImageTo3D(Mat disparity, Mat _3dImage, Mat Q)
disparity
- Input single-channel 8-bit unsigned, 16-bit signed, 32-bit signed or 32-bit
floating-point disparity image. If 16-bit signed format is used, the values are assumed to have no
fractional bits._3dImage
- Output 3-channel floating-point image of the same size as disparity . Each
element of _3dImage(x,y) contains 3D coordinates of the point (x,y) computed from the disparity
map.Q
- \(4 \times 4\) perspective transformation matrix that can be obtained with stereoRectify.
points where the disparity was not computed). If handleMissingValues=true, then pixels with the
minimal disparity that corresponds to the outliers (see StereoMatcher::compute ) are transformed
to 3D points with a very large Z value (currently set to 10000).
depth. ddepth can also be set to CV_16S, CV_32S or CV_32F.
The function transforms a single-channel disparity map to a 3-channel image representing a 3D
surface. That is, for each pixel (x,y) and the corresponding disparity d=disparity(x,y) , it
computes:
\(\begin{array}{l} [X \; Y \; Z \; W]^T = \texttt{Q} *[x \; y \; \texttt{disparity} (x,y) \; 1]^T \\ \texttt{\_3dImage} (x,y) = (X/W, \; Y/W, \; Z/W) \end{array}\)
The matrix Q can be an arbitrary \(4 \times 4\) matrix (for example, the one computed by
stereoRectify). To reproject a sparse set of points {(x,y,d),...} to 3D space, use
perspectiveTransform .public static void solvePnPRefineLM(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat rvec, Mat tvec, TermCriteria criteria)
objectPoints
- Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel,
where N is the number of points. vector<Point3f> can also be passed here.imagePoints
- Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
where N is the number of points. vector<Point2f> can also be passed here.cameraMatrix
- Input camera matrix \(A = \vecthreethree{fx}{0}{cx}{0}{fy}{cy}{0}{0}{1}\) .distCoeffs
- Input vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) of
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
assumed.rvec
- Input/Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
the model coordinate system to the camera coordinate system. Input values are used as an initial solution.tvec
- Input/Output translation vector. Input values are used as an initial solution.criteria
- Criteria when to stop the Levenberg-Marquard iterative algorithm.
The function refines the object pose given at least 3 object points, their corresponding image
projections, an initial solution for the rotation and translation vector,
as well as the camera matrix and the distortion coefficients.
The function minimizes the projection error with respect to the rotation and the translation vectors, according
to a Levenberg-Marquardt iterative minimization CITE: Madsen04 CITE: Eade13 process.public static void solvePnPRefineLM(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat rvec, Mat tvec)
objectPoints
- Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel,
where N is the number of points. vector<Point3f> can also be passed here.imagePoints
- Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
where N is the number of points. vector<Point2f> can also be passed here.cameraMatrix
- Input camera matrix \(A = \vecthreethree{fx}{0}{cx}{0}{fy}{cy}{0}{0}{1}\) .distCoeffs
- Input vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) of
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
assumed.rvec
- Input/Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
the model coordinate system to the camera coordinate system. Input values are used as an initial solution.tvec
- Input/Output translation vector. Input values are used as an initial solution.
The function refines the object pose given at least 3 object points, their corresponding image
projections, an initial solution for the rotation and translation vector,
as well as the camera matrix and the distortion coefficients.
The function minimizes the projection error with respect to the rotation and the translation vectors, according
to a Levenberg-Marquardt iterative minimization CITE: Madsen04 CITE: Eade13 process.public static void solvePnPRefineVVS(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat rvec, Mat tvec, TermCriteria criteria, double VVSlambda)
objectPoints
- Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel,
where N is the number of points. vector<Point3f> can also be passed here.imagePoints
- Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
where N is the number of points. vector<Point2f> can also be passed here.cameraMatrix
- Input camera matrix \(A = \vecthreethree{fx}{0}{cx}{0}{fy}{cy}{0}{0}{1}\) .distCoeffs
- Input vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) of
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
assumed.rvec
- Input/Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
the model coordinate system to the camera coordinate system. Input values are used as an initial solution.tvec
- Input/Output translation vector. Input values are used as an initial solution.criteria
- Criteria when to stop the Levenberg-Marquard iterative algorithm.VVSlambda
- Gain for the virtual visual servoing control law, equivalent to the \(\alpha\)
gain in the Damped Gauss-Newton formulation.
The function refines the object pose given at least 3 object points, their corresponding image
projections, an initial solution for the rotation and translation vector,
as well as the camera matrix and the distortion coefficients.
The function minimizes the projection error with respect to the rotation and the translation vectors, using a
virtual visual servoing (VVS) CITE: Chaumette06 CITE: Marchand16 scheme.public static void solvePnPRefineVVS(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat rvec, Mat tvec, TermCriteria criteria)
objectPoints
- Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel,
where N is the number of points. vector<Point3f> can also be passed here.imagePoints
- Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
where N is the number of points. vector<Point2f> can also be passed here.cameraMatrix
- Input camera matrix \(A = \vecthreethree{fx}{0}{cx}{0}{fy}{cy}{0}{0}{1}\) .distCoeffs
- Input vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) of
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
assumed.rvec
- Input/Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
the model coordinate system to the camera coordinate system. Input values are used as an initial solution.tvec
- Input/Output translation vector. Input values are used as an initial solution.criteria
- Criteria when to stop the Levenberg-Marquard iterative algorithm.
gain in the Damped Gauss-Newton formulation.
The function refines the object pose given at least 3 object points, their corresponding image
projections, an initial solution for the rotation and translation vector,
as well as the camera matrix and the distortion coefficients.
The function minimizes the projection error with respect to the rotation and the translation vectors, using a
virtual visual servoing (VVS) CITE: Chaumette06 CITE: Marchand16 scheme.public static void solvePnPRefineVVS(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat rvec, Mat tvec)
objectPoints
- Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel,
where N is the number of points. vector<Point3f> can also be passed here.imagePoints
- Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
where N is the number of points. vector<Point2f> can also be passed here.cameraMatrix
- Input camera matrix \(A = \vecthreethree{fx}{0}{cx}{0}{fy}{cy}{0}{0}{1}\) .distCoeffs
- Input vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) of
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
assumed.rvec
- Input/Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
the model coordinate system to the camera coordinate system. Input values are used as an initial solution.tvec
- Input/Output translation vector. Input values are used as an initial solution.
gain in the Damped Gauss-Newton formulation.
The function refines the object pose given at least 3 object points, their corresponding image
projections, an initial solution for the rotation and translation vector,
as well as the camera matrix and the distortion coefficients.
The function minimizes the projection error with respect to the rotation and the translation vectors, using a
virtual visual servoing (VVS) CITE: Chaumette06 CITE: Marchand16 scheme.public static void stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags, double alpha, Size newImageSize, Rect validPixROI1, Rect validPixROI2)
cameraMatrix1
- First camera matrix.distCoeffs1
- First camera distortion parameters.cameraMatrix2
- Second camera matrix.distCoeffs2
- Second camera distortion parameters.imageSize
- Size of the image used for stereo calibration.R
- Rotation matrix between the coordinate systems of the first and the second cameras.T
- Translation vector between coordinate systems of the cameras.R1
- Output 3x3 rectification transform (rotation matrix) for the first camera.R2
- Output 3x3 rectification transform (rotation matrix) for the second camera.P1
- Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
camera.P2
- Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
camera.Q
- Output \(4 \times 4\) disparity-to-depth mapping matrix (see reprojectImageTo3D ).flags
- Operation flags that may be zero or CALIB_ZERO_DISPARITY . If the flag is set,
the function makes the principal points of each camera have the same pixel coordinates in the
rectified views. And if the flag is not set, the function may still shift the images in the
horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
useful image area.alpha
- Free scaling parameter. If it is -1 or absent, the function performs the default
scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
images are zoomed and shifted so that only valid pixels are visible (no black areas after
rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
pixels from the original images from the cameras are retained in the rectified images (no source
image pixels are lost). Obviously, any intermediate value yields an intermediate result between
those two extreme cases.newImageSize
- New image resolution after rectification. The same size should be passed to
initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
is passed (default), it is set to the original imageSize . Setting it to larger value can help you
preserve details in the original image, especially when there is a big radial distortion.validPixROI1
- Optional output rectangles inside the rectified images where all the pixels
are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
(see the picture below).validPixROI2
- Optional output rectangles inside the rectified images where all the pixels
are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
(see the picture below).
The function computes the rotation matrices for each camera that (virtually) make both camera image
planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
the dense stereo correspondence problem. The function takes the matrices computed by stereoCalibrate
as input. As output, it provides two rotation matrices and also two projection matrices in the new
coordinates. The function distinguishes the following two cases:
public static void stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags, double alpha, Size newImageSize, Rect validPixROI1)
cameraMatrix1
- First camera matrix.distCoeffs1
- First camera distortion parameters.cameraMatrix2
- Second camera matrix.distCoeffs2
- Second camera distortion parameters.imageSize
- Size of the image used for stereo calibration.R
- Rotation matrix between the coordinate systems of the first and the second cameras.T
- Translation vector between coordinate systems of the cameras.R1
- Output 3x3 rectification transform (rotation matrix) for the first camera.R2
- Output 3x3 rectification transform (rotation matrix) for the second camera.P1
- Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
camera.P2
- Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
camera.Q
- Output \(4 \times 4\) disparity-to-depth mapping matrix (see reprojectImageTo3D ).flags
- Operation flags that may be zero or CALIB_ZERO_DISPARITY . If the flag is set,
the function makes the principal points of each camera have the same pixel coordinates in the
rectified views. And if the flag is not set, the function may still shift the images in the
horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
useful image area.alpha
- Free scaling parameter. If it is -1 or absent, the function performs the default
scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
images are zoomed and shifted so that only valid pixels are visible (no black areas after
rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
pixels from the original images from the cameras are retained in the rectified images (no source
image pixels are lost). Obviously, any intermediate value yields an intermediate result between
those two extreme cases.newImageSize
- New image resolution after rectification. The same size should be passed to
initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
is passed (default), it is set to the original imageSize . Setting it to larger value can help you
preserve details in the original image, especially when there is a big radial distortion.validPixROI1
- Optional output rectangles inside the rectified images where all the pixels
are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
(see the picture below).
are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
(see the picture below).
The function computes the rotation matrices for each camera that (virtually) make both camera image
planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
the dense stereo correspondence problem. The function takes the matrices computed by stereoCalibrate
as input. As output, it provides two rotation matrices and also two projection matrices in the new
coordinates. The function distinguishes the following two cases:
public static void stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags, double alpha, Size newImageSize)
cameraMatrix1
- First camera matrix.distCoeffs1
- First camera distortion parameters.cameraMatrix2
- Second camera matrix.distCoeffs2
- Second camera distortion parameters.imageSize
- Size of the image used for stereo calibration.R
- Rotation matrix between the coordinate systems of the first and the second cameras.T
- Translation vector between coordinate systems of the cameras.R1
- Output 3x3 rectification transform (rotation matrix) for the first camera.R2
- Output 3x3 rectification transform (rotation matrix) for the second camera.P1
- Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
camera.P2
- Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
camera.Q
- Output \(4 \times 4\) disparity-to-depth mapping matrix (see reprojectImageTo3D ).flags
- Operation flags that may be zero or CALIB_ZERO_DISPARITY . If the flag is set,
the function makes the principal points of each camera have the same pixel coordinates in the
rectified views. And if the flag is not set, the function may still shift the images in the
horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
useful image area.alpha
- Free scaling parameter. If it is -1 or absent, the function performs the default
scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
images are zoomed and shifted so that only valid pixels are visible (no black areas after
rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
pixels from the original images from the cameras are retained in the rectified images (no source
image pixels are lost). Obviously, any intermediate value yields an intermediate result between
those two extreme cases.newImageSize
- New image resolution after rectification. The same size should be passed to
initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
is passed (default), it is set to the original imageSize . Setting it to larger value can help you
preserve details in the original image, especially when there is a big radial distortion.
are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
(see the picture below).
are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
(see the picture below).
The function computes the rotation matrices for each camera that (virtually) make both camera image
planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
the dense stereo correspondence problem. The function takes the matrices computed by stereoCalibrate
as input. As output, it provides two rotation matrices and also two projection matrices in the new
coordinates. The function distinguishes the following two cases:
public static void stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags, double alpha)
cameraMatrix1
- First camera matrix.distCoeffs1
- First camera distortion parameters.cameraMatrix2
- Second camera matrix.distCoeffs2
- Second camera distortion parameters.imageSize
- Size of the image used for stereo calibration.R
- Rotation matrix between the coordinate systems of the first and the second cameras.T
- Translation vector between coordinate systems of the cameras.R1
- Output 3x3 rectification transform (rotation matrix) for the first camera.R2
- Output 3x3 rectification transform (rotation matrix) for the second camera.P1
- Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
camera.P2
- Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
camera.Q
- Output \(4 \times 4\) disparity-to-depth mapping matrix (see reprojectImageTo3D ).flags
- Operation flags that may be zero or CALIB_ZERO_DISPARITY . If the flag is set,
the function makes the principal points of each camera have the same pixel coordinates in the
rectified views. And if the flag is not set, the function may still shift the images in the
horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
useful image area.alpha
- Free scaling parameter. If it is -1 or absent, the function performs the default
scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
images are zoomed and shifted so that only valid pixels are visible (no black areas after
rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
pixels from the original images from the cameras are retained in the rectified images (no source
image pixels are lost). Obviously, any intermediate value yields an intermediate result between
those two extreme cases.
initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
is passed (default), it is set to the original imageSize . Setting it to larger value can help you
preserve details in the original image, especially when there is a big radial distortion.
are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
(see the picture below).
are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
(see the picture below).
The function computes the rotation matrices for each camera that (virtually) make both camera image
planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
the dense stereo correspondence problem. The function takes the matrices computed by stereoCalibrate
as input. As output, it provides two rotation matrices and also two projection matrices in the new
coordinates. The function distinguishes the following two cases:
public static void stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags)
cameraMatrix1
- First camera matrix.distCoeffs1
- First camera distortion parameters.cameraMatrix2
- Second camera matrix.distCoeffs2
- Second camera distortion parameters.imageSize
- Size of the image used for stereo calibration.R
- Rotation matrix between the coordinate systems of the first and the second cameras.T
- Translation vector between coordinate systems of the cameras.R1
- Output 3x3 rectification transform (rotation matrix) for the first camera.R2
- Output 3x3 rectification transform (rotation matrix) for the second camera.P1
- Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
camera.P2
- Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
camera.Q
- Output \(4 \times 4\) disparity-to-depth mapping matrix (see reprojectImageTo3D ).flags
- Operation flags that may be zero or CALIB_ZERO_DISPARITY . If the flag is set,
the function makes the principal points of each camera have the same pixel coordinates in the
rectified views. And if the flag is not set, the function may still shift the images in the
horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
useful image area.
scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
images are zoomed and shifted so that only valid pixels are visible (no black areas after
rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
pixels from the original images from the cameras are retained in the rectified images (no source
image pixels are lost). Obviously, any intermediate value yields an intermediate result between
those two extreme cases.
initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
is passed (default), it is set to the original imageSize . Setting it to larger value can help you
preserve details in the original image, especially when there is a big radial distortion.
are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
(see the picture below).
are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
(see the picture below).
The function computes the rotation matrices for each camera that (virtually) make both camera image
planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
the dense stereo correspondence problem. The function takes the matrices computed by stereoCalibrate
as input. As output, it provides two rotation matrices and also two projection matrices in the new
coordinates. The function distinguishes the following two cases:
public static void stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q)
cameraMatrix1
- First camera matrix.distCoeffs1
- First camera distortion parameters.cameraMatrix2
- Second camera matrix.distCoeffs2
- Second camera distortion parameters.imageSize
- Size of the image used for stereo calibration.R
- Rotation matrix between the coordinate systems of the first and the second cameras.T
- Translation vector between coordinate systems of the cameras.R1
- Output 3x3 rectification transform (rotation matrix) for the first camera.R2
- Output 3x3 rectification transform (rotation matrix) for the second camera.P1
- Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
camera.P2
- Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
camera.Q
- Output \(4 \times 4\) disparity-to-depth mapping matrix (see reprojectImageTo3D ).
the function makes the principal points of each camera have the same pixel coordinates in the
rectified views. And if the flag is not set, the function may still shift the images in the
horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
useful image area.
scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
images are zoomed and shifted so that only valid pixels are visible (no black areas after
rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
pixels from the original images from the cameras are retained in the rectified images (no source
image pixels are lost). Obviously, any intermediate value yields an intermediate result between
those two extreme cases.
initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
is passed (default), it is set to the original imageSize . Setting it to larger value can help you
preserve details in the original image, especially when there is a big radial distortion.
are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
(see the picture below).
are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
(see the picture below).
The function computes the rotation matrices for each camera that (virtually) make both camera image
planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
the dense stereo correspondence problem. The function takes the matrices computed by stereoCalibrate
as input. As output, it provides two rotation matrices and also two projection matrices in the new
coordinates. The function distinguishes the following two cases:
public static void triangulatePoints(Mat projMatr1, Mat projMatr2, Mat projPoints1, Mat projPoints2, Mat points4D)
projMatr1
- 3x4 projection matrix of the first camera.projMatr2
- 3x4 projection matrix of the second camera.projPoints1
- 2xN array of feature points in the first image. In case of c++ version it can
be also a vector of feature points or two-channel matrix of size 1xN or Nx1.projPoints2
- 2xN array of corresponding points in the second image. In case of c++ version
it can be also a vector of feature points or two-channel matrix of size 1xN or Nx1.points4D
- 4xN array of reconstructed points in homogeneous coordinates.
The function reconstructs 3-dimensional points (in homogeneous coordinates) by using their
observations with a stereo camera. Projections matrices can be obtained from stereoRectify.
Note:
Keep in mind that all input data should be of float type in order for this function to work.
SEE:
reprojectImageTo3Dpublic static void undistort(Mat src, Mat dst, Mat cameraMatrix, Mat distCoeffs, Mat newCameraMatrix)
src
- Input (distorted) image.dst
- Output (corrected) image that has the same size and type as src .cameraMatrix
- Input camera matrix \(A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .distCoeffs
- Input vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.newCameraMatrix
- Camera matrix of the distorted image. By default, it is the same as
cameraMatrix but you may additionally scale and shift the result by using a different matrix.public static void undistort(Mat src, Mat dst, Mat cameraMatrix, Mat distCoeffs)
src
- Input (distorted) image.dst
- Output (corrected) image that has the same size and type as src .cameraMatrix
- Input camera matrix \(A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .distCoeffs
- Input vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
cameraMatrix but you may additionally scale and shift the result by using a different matrix.public static void undistortPointsIter(Mat src, Mat dst, Mat cameraMatrix, Mat distCoeffs, Mat R, Mat P, TermCriteria criteria)
src
- automatically generateddst
- automatically generatedcameraMatrix
- automatically generateddistCoeffs
- automatically generatedR
- automatically generatedP
- automatically generatedcriteria
- automatically generatedpublic static void undistortPoints(MatOfPoint2f src, MatOfPoint2f dst, Mat cameraMatrix, Mat distCoeffs, Mat R, Mat P)
src
- Observed point coordinates, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or CV_64FC2) (or
vector<Point2f> ).dst
- Output ideal point coordinates (1xN/Nx1 2-channel or vector<Point2f> ) after undistortion and reverse perspective
transformation. If matrix P is identity or omitted, dst will contain normalized point coordinates.cameraMatrix
- Camera matrix \(\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .distCoeffs
- Input vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.R
- Rectification transformation in the object space (3x3 matrix). R1 or R2 computed by
#stereoRectify can be passed here. If the matrix is empty, the identity transformation is used.P
- New camera matrix (3x3) or new projection matrix (3x4) \(\begin{bmatrix} {f'}_x & 0 & {c'}_x & t_x \\ 0 & {f'}_y & {c'}_y & t_y \\ 0 & 0 & 1 & t_z \end{bmatrix}\). P1 or P2 computed by
#stereoRectify can be passed here. If the matrix is empty, the identity new camera matrix is used.public static void undistortPoints(MatOfPoint2f src, MatOfPoint2f dst, Mat cameraMatrix, Mat distCoeffs, Mat R)
src
- Observed point coordinates, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or CV_64FC2) (or
vector<Point2f> ).dst
- Output ideal point coordinates (1xN/Nx1 2-channel or vector<Point2f> ) after undistortion and reverse perspective
transformation. If matrix P is identity or omitted, dst will contain normalized point coordinates.cameraMatrix
- Camera matrix \(\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .distCoeffs
- Input vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.R
- Rectification transformation in the object space (3x3 matrix). R1 or R2 computed by
#stereoRectify can be passed here. If the matrix is empty, the identity transformation is used.
#stereoRectify can be passed here. If the matrix is empty, the identity new camera matrix is used.public static void undistortPoints(MatOfPoint2f src, MatOfPoint2f dst, Mat cameraMatrix, Mat distCoeffs)
src
- Observed point coordinates, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or CV_64FC2) (or
vector<Point2f> ).dst
- Output ideal point coordinates (1xN/Nx1 2-channel or vector<Point2f> ) after undistortion and reverse perspective
transformation. If matrix P is identity or omitted, dst will contain normalized point coordinates.cameraMatrix
- Camera matrix \(\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .distCoeffs
- Input vector of distortion coefficients
\((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
#stereoRectify can be passed here. If the matrix is empty, the identity transformation is used.
#stereoRectify can be passed here. If the matrix is empty, the identity new camera matrix is used.public static void validateDisparity(Mat disparity, Mat cost, int minDisparity, int numberOfDisparities, int disp12MaxDisp)
public static void validateDisparity(Mat disparity, Mat cost, int minDisparity, int numberOfDisparities)
public static void fisheye_distortPoints(Mat undistorted, Mat distorted, Mat K, Mat D, double alpha)
undistorted
- Array of object points, 1xN/Nx1 2-channel (or vector<Point2f> ), where N is
the number of points in the view.K
- Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\).D
- Input vector of distortion coefficients \((k_1, k_2, k_3, k_4)\).alpha
- The skew coefficient.distorted
- Output array of image points, 1xN/Nx1 2-channel, or vector<Point2f> .
Note that the function assumes the camera matrix of the undistorted points to be identity.
This means if you want to transform back points undistorted with undistortPoints() you have to
multiply them with \(P^{-1}\).public static void fisheye_distortPoints(Mat undistorted, Mat distorted, Mat K, Mat D)
undistorted
- Array of object points, 1xN/Nx1 2-channel (or vector<Point2f> ), where N is
the number of points in the view.K
- Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\).D
- Input vector of distortion coefficients \((k_1, k_2, k_3, k_4)\).distorted
- Output array of image points, 1xN/Nx1 2-channel, or vector<Point2f> .
Note that the function assumes the camera matrix of the undistorted points to be identity.
This means if you want to transform back points undistorted with undistortPoints() you have to
multiply them with \(P^{-1}\).public static void fisheye_estimateNewCameraMatrixForUndistortRectify(Mat K, Mat D, Size image_size, Mat R, Mat P, double balance, Size new_size, double fov_scale)
K
- Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\).image_size
- Size of the imageD
- Input vector of distortion coefficients \((k_1, k_2, k_3, k_4)\).R
- Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
1-channel or 1x1 3-channelP
- New camera matrix (3x3) or new projection matrix (3x4)balance
- Sets the new focal length in range between the min focal length and the max focal
length. Balance is in range of [0, 1].new_size
- the new sizefov_scale
- Divisor for new focal length.public static void fisheye_estimateNewCameraMatrixForUndistortRectify(Mat K, Mat D, Size image_size, Mat R, Mat P, double balance, Size new_size)
K
- Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\).image_size
- Size of the imageD
- Input vector of distortion coefficients \((k_1, k_2, k_3, k_4)\).R
- Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
1-channel or 1x1 3-channelP
- New camera matrix (3x3) or new projection matrix (3x4)balance
- Sets the new focal length in range between the min focal length and the max focal
length. Balance is in range of [0, 1].new_size
- the new sizepublic static void fisheye_estimateNewCameraMatrixForUndistortRectify(Mat K, Mat D, Size image_size, Mat R, Mat P, double balance)
K
- Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\).image_size
- Size of the imageD
- Input vector of distortion coefficients \((k_1, k_2, k_3, k_4)\).R
- Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
1-channel or 1x1 3-channelP
- New camera matrix (3x3) or new projection matrix (3x4)balance
- Sets the new focal length in range between the min focal length and the max focal
length. Balance is in range of [0, 1].public static void fisheye_estimateNewCameraMatrixForUndistortRectify(Mat K, Mat D, Size image_size, Mat R, Mat P)
K
- Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\).image_size
- Size of the imageD
- Input vector of distortion coefficients \((k_1, k_2, k_3, k_4)\).R
- Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
1-channel or 1x1 3-channelP
- New camera matrix (3x3) or new projection matrix (3x4)
length. Balance is in range of [0, 1].public static void fisheye_initUndistortRectifyMap(Mat K, Mat D, Mat R, Mat P, Size size, int m1type, Mat map1, Mat map2)
K
- Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\).D
- Input vector of distortion coefficients \((k_1, k_2, k_3, k_4)\).R
- Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
1-channel or 1x1 3-channelP
- New camera matrix (3x3) or new projection matrix (3x4)size
- Undistorted image size.m1type
- Type of the first output map that can be CV_32FC1 or CV_16SC2 . See convertMaps()
for details.map1
- The first output map.map2
- The second output map.public static void fisheye_projectPoints(Mat objectPoints, Mat imagePoints, Mat rvec, Mat tvec, Mat K, Mat D, double alpha, Mat jacobian)
public static void fisheye_projectPoints(Mat objectPoints, Mat imagePoints, Mat rvec, Mat tvec, Mat K, Mat D, double alpha)
public static void fisheye_projectPoints(Mat objectPoints, Mat imagePoints, Mat rvec, Mat tvec, Mat K, Mat D)
public static void fisheye_stereoRectify(Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat tvec, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags, Size newImageSize, double balance, double fov_scale)
K1
- First camera matrix.D1
- First camera distortion parameters.K2
- Second camera matrix.D2
- Second camera distortion parameters.imageSize
- Size of the image used for stereo calibration.R
- Rotation matrix between the coordinate systems of the first and the second
cameras.tvec
- Translation vector between coordinate systems of the cameras.R1
- Output 3x3 rectification transform (rotation matrix) for the first camera.R2
- Output 3x3 rectification transform (rotation matrix) for the second camera.P1
- Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
camera.P2
- Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
camera.Q
- Output \(4 \times 4\) disparity-to-depth mapping matrix (see reprojectImageTo3D ).flags
- Operation flags that may be zero or CALIB_ZERO_DISPARITY . If the flag is set,
the function makes the principal points of each camera have the same pixel coordinates in the
rectified views. And if the flag is not set, the function may still shift the images in the
horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
useful image area.newImageSize
- New image resolution after rectification. The same size should be passed to
initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
is passed (default), it is set to the original imageSize . Setting it to larger value can help you
preserve details in the original image, especially when there is a big radial distortion.balance
- Sets the new focal length in range between the min focal length and the max focal
length. Balance is in range of [0, 1].fov_scale
- Divisor for new focal length.public static void fisheye_stereoRectify(Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat tvec, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags, Size newImageSize, double balance)
K1
- First camera matrix.D1
- First camera distortion parameters.K2
- Second camera matrix.D2
- Second camera distortion parameters.imageSize
- Size of the image used for stereo calibration.R
- Rotation matrix between the coordinate systems of the first and the second
cameras.tvec
- Translation vector between coordinate systems of the cameras.R1
- Output 3x3 rectification transform (rotation matrix) for the first camera.R2
- Output 3x3 rectification transform (rotation matrix) for the second camera.P1
- Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
camera.P2
- Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
camera.Q
- Output \(4 \times 4\) disparity-to-depth mapping matrix (see reprojectImageTo3D ).flags
- Operation flags that may be zero or CALIB_ZERO_DISPARITY . If the flag is set,
the function makes the principal points of each camera have the same pixel coordinates in the
rectified views. And if the flag is not set, the function may still shift the images in the
horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
useful image area.newImageSize
- New image resolution after rectification. The same size should be passed to
initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
is passed (default), it is set to the original imageSize . Setting it to larger value can help you
preserve details in the original image, especially when there is a big radial distortion.balance
- Sets the new focal length in range between the min focal length and the max focal
length. Balance is in range of [0, 1].public static void fisheye_stereoRectify(Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat tvec, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags, Size newImageSize)
K1
- First camera matrix.D1
- First camera distortion parameters.K2
- Second camera matrix.D2
- Second camera distortion parameters.imageSize
- Size of the image used for stereo calibration.R
- Rotation matrix between the coordinate systems of the first and the second
cameras.tvec
- Translation vector between coordinate systems of the cameras.R1
- Output 3x3 rectification transform (rotation matrix) for the first camera.R2
- Output 3x3 rectification transform (rotation matrix) for the second camera.P1
- Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
camera.P2
- Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
camera.Q
- Output \(4 \times 4\) disparity-to-depth mapping matrix (see reprojectImageTo3D ).flags
- Operation flags that may be zero or CALIB_ZERO_DISPARITY . If the flag is set,
the function makes the principal points of each camera have the same pixel coordinates in the
rectified views. And if the flag is not set, the function may still shift the images in the
horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
useful image area.newImageSize
- New image resolution after rectification. The same size should be passed to
initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
is passed (default), it is set to the original imageSize . Setting it to larger value can help you
preserve details in the original image, especially when there is a big radial distortion.
length. Balance is in range of [0, 1].public static void fisheye_stereoRectify(Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat tvec, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags)
K1
- First camera matrix.D1
- First camera distortion parameters.K2
- Second camera matrix.D2
- Second camera distortion parameters.imageSize
- Size of the image used for stereo calibration.R
- Rotation matrix between the coordinate systems of the first and the second
cameras.tvec
- Translation vector between coordinate systems of the cameras.R1
- Output 3x3 rectification transform (rotation matrix) for the first camera.R2
- Output 3x3 rectification transform (rotation matrix) for the second camera.P1
- Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
camera.P2
- Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
camera.Q
- Output \(4 \times 4\) disparity-to-depth mapping matrix (see reprojectImageTo3D ).flags
- Operation flags that may be zero or CALIB_ZERO_DISPARITY . If the flag is set,
the function makes the principal points of each camera have the same pixel coordinates in the
rectified views. And if the flag is not set, the function may still shift the images in the
horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
useful image area.
initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
is passed (default), it is set to the original imageSize . Setting it to larger value can help you
preserve details in the original image, especially when there is a big radial distortion.
length. Balance is in range of [0, 1].public static void fisheye_undistortImage(Mat distorted, Mat undistorted, Mat K, Mat D, Mat Knew, Size new_size)
distorted
- image with fisheye lens distortion.undistorted
- Output image with compensated fisheye lens distortion.K
- Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\).D
- Input vector of distortion coefficients \((k_1, k_2, k_3, k_4)\).Knew
- Camera matrix of the distorted image. By default, it is the identity matrix but you
may additionally scale and shift the result by using a different matrix.new_size
- the new size
The function transforms an image to compensate radial and tangential lens distortion.
The function is simply a combination of fisheye::initUndistortRectifyMap (with unity R ) and remap
(with bilinear interpolation). See the former function for details of the transformation being
performed.
See below the results of undistortImage.
public static void fisheye_undistortImage(Mat distorted, Mat undistorted, Mat K, Mat D, Mat Knew)
distorted
- image with fisheye lens distortion.undistorted
- Output image with compensated fisheye lens distortion.K
- Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\).D
- Input vector of distortion coefficients \((k_1, k_2, k_3, k_4)\).Knew
- Camera matrix of the distorted image. By default, it is the identity matrix but you
may additionally scale and shift the result by using a different matrix.
The function transforms an image to compensate radial and tangential lens distortion.
The function is simply a combination of fisheye::initUndistortRectifyMap (with unity R ) and remap
(with bilinear interpolation). See the former function for details of the transformation being
performed.
See below the results of undistortImage.
public static void fisheye_undistortImage(Mat distorted, Mat undistorted, Mat K, Mat D)
distorted
- image with fisheye lens distortion.undistorted
- Output image with compensated fisheye lens distortion.K
- Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\).D
- Input vector of distortion coefficients \((k_1, k_2, k_3, k_4)\).
may additionally scale and shift the result by using a different matrix.
The function transforms an image to compensate radial and tangential lens distortion.
The function is simply a combination of fisheye::initUndistortRectifyMap (with unity R ) and remap
(with bilinear interpolation). See the former function for details of the transformation being
performed.
See below the results of undistortImage.
public static void fisheye_undistortPoints(Mat distorted, Mat undistorted, Mat K, Mat D, Mat R, Mat P)
distorted
- Array of object points, 1xN/Nx1 2-channel (or vector<Point2f> ), where N is the
number of points in the view.K
- Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\).D
- Input vector of distortion coefficients \((k_1, k_2, k_3, k_4)\).R
- Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
1-channel or 1x1 3-channelP
- New camera matrix (3x3) or new projection matrix (3x4)undistorted
- Output array of image points, 1xN/Nx1 2-channel, or vector<Point2f> .public static void fisheye_undistortPoints(Mat distorted, Mat undistorted, Mat K, Mat D, Mat R)
distorted
- Array of object points, 1xN/Nx1 2-channel (or vector<Point2f> ), where N is the
number of points in the view.K
- Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\).D
- Input vector of distortion coefficients \((k_1, k_2, k_3, k_4)\).R
- Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
1-channel or 1x1 3-channelundistorted
- Output array of image points, 1xN/Nx1 2-channel, or vector<Point2f> .public static void fisheye_undistortPoints(Mat distorted, Mat undistorted, Mat K, Mat D)
distorted
- Array of object points, 1xN/Nx1 2-channel (or vector<Point2f> ), where N is the
number of points in the view.K
- Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\).D
- Input vector of distortion coefficients \((k_1, k_2, k_3, k_4)\).
1-channel or 1x1 3-channelundistorted
- Output array of image points, 1xN/Nx1 2-channel, or vector<Point2f> .Generated on Wed Oct 9 2019 23:24:43 UTC / OpenCV 4.1.2