OpenCV  5.0.0-pre
Open Source Computer Vision
Loading...
Searching...
No Matches
Namespaces | Classes | Enumerations | Enumerator | Functions
Stereo Correspondence

Detailed Description

Namespaces

namespace  cv::fisheye
 The methods in this namespace use a so-called fisheye camera model.
 

Classes

class  cv::StereoBM
 Class for computing stereo correspondence using the block matching algorithm, introduced and contributed to OpenCV by K. Konolige. More...
 
class  cv::StereoMatcher
 The base class for stereo correspondence algorithms. More...
 
class  cv::StereoSGBM
 The class implements the modified H. Hirschmuller algorithm [126] that differs from the original one as follows: More...
 

Enumerations

enum  {
  cv::StereoMatcher::DISP_SHIFT = 4 ,
  cv::StereoMatcher::DISP_SCALE = (1 << DISP_SHIFT)
}
 
enum  {
  cv::StereoBM::PREFILTER_NORMALIZED_RESPONSE = 0 ,
  cv::StereoBM::PREFILTER_XSOBEL = 1
}
 
enum  {
  cv::StereoSGBM::MODE_SGBM = 0 ,
  cv::StereoSGBM::MODE_HH = 1 ,
  cv::StereoSGBM::MODE_SGBM_3WAY = 2 ,
  cv::StereoSGBM::MODE_HH4 = 3
}
 

Functions

virtual void cv::StereoMatcher::compute (InputArray left, InputArray right, OutputArray disparity)=0
 Computes disparity map for the specified stereo pair.
 
static Ptr< StereoSGBMcv::StereoSGBM::create (int minDisparity=0, int numDisparities=16, int blockSize=3, int P1=0, int P2=0, int disp12MaxDiff=0, int preFilterCap=0, int uniquenessRatio=0, int speckleWindowSize=0, int speckleRange=0, int mode=StereoSGBM::MODE_SGBM)
 Creates StereoSGBM object.
 
static Ptr< StereoBMcv::StereoBM::create (int numDisparities=0, int blockSize=21)
 Creates StereoBM object.
 
void cv::filterSpeckles (InputOutputArray img, double newVal, int maxSpeckleSize, double maxDiff, InputOutputArray buf=noArray())
 Filters off small noise blobs (speckles) in the disparity map.
 
virtual int cv::StereoMatcher::getBlockSize () const =0
 
virtual int cv::StereoMatcher::getDisp12MaxDiff () const =0
 
virtual int cv::StereoMatcher::getMinDisparity () const =0
 
virtual int cv::StereoSGBM::getMode () const =0
 
virtual int cv::StereoMatcher::getNumDisparities () const =0
 
virtual int cv::StereoSGBM::getP1 () const =0
 
virtual int cv::StereoSGBM::getP2 () const =0
 
virtual int cv::StereoBM::getPreFilterCap () const =0
 
virtual int cv::StereoSGBM::getPreFilterCap () const =0
 
virtual int cv::StereoBM::getPreFilterSize () const =0
 
virtual int cv::StereoBM::getPreFilterType () const =0
 
virtual Rect cv::StereoBM::getROI1 () const =0
 
virtual Rect cv::StereoBM::getROI2 () const =0
 
virtual int cv::StereoBM::getSmallerBlockSize () const =0
 
virtual int cv::StereoMatcher::getSpeckleRange () const =0
 
virtual int cv::StereoMatcher::getSpeckleWindowSize () const =0
 
virtual int cv::StereoBM::getTextureThreshold () const =0
 
virtual int cv::StereoBM::getUniquenessRatio () const =0
 
virtual int cv::StereoSGBM::getUniquenessRatio () const =0
 
Rect cv::getValidDisparityROI (Rect roi1, Rect roi2, int minDisparity, int numberOfDisparities, int blockSize)
 computes valid disparity ROI from the valid ROIs of the rectified images (that are returned by stereoRectify)
 
float cv::rectify3Collinear (InputArray _cameraMatrix1, InputArray _distCoeffs1, InputArray _cameraMatrix2, InputArray _distCoeffs2, InputArray _cameraMatrix3, InputArray _distCoeffs3, InputArrayOfArrays _imgpt1, InputArrayOfArrays _imgpt3, Size imageSize, InputArray _Rmat12, InputArray _Tmat12, InputArray _Rmat13, InputArray _Tmat13, OutputArray _Rmat1, OutputArray _Rmat2, OutputArray _Rmat3, OutputArray _Pmat1, OutputArray _Pmat2, OutputArray _Pmat3, OutputArray _Qmat, double alpha, Size newImgSize, Rect *roi1, Rect *roi2, int flags)
 
void cv::reprojectImageTo3D (InputArray disparity, OutputArray _3dImage, InputArray Q, bool handleMissingValues=false, int ddepth=-1)
 Reprojects a disparity image to 3D space.
 
virtual void cv::StereoMatcher::setBlockSize (int blockSize)=0
 
virtual void cv::StereoMatcher::setDisp12MaxDiff (int disp12MaxDiff)=0
 
virtual void cv::StereoMatcher::setMinDisparity (int minDisparity)=0
 
virtual void cv::StereoSGBM::setMode (int mode)=0
 
virtual void cv::StereoMatcher::setNumDisparities (int numDisparities)=0
 
virtual void cv::StereoSGBM::setP1 (int P1)=0
 
virtual void cv::StereoSGBM::setP2 (int P2)=0
 
virtual void cv::StereoBM::setPreFilterCap (int preFilterCap)=0
 
virtual void cv::StereoSGBM::setPreFilterCap (int preFilterCap)=0
 
virtual void cv::StereoBM::setPreFilterSize (int preFilterSize)=0
 
virtual void cv::StereoBM::setPreFilterType (int preFilterType)=0
 
virtual void cv::StereoBM::setROI1 (Rect roi1)=0
 
virtual void cv::StereoBM::setROI2 (Rect roi2)=0
 
virtual void cv::StereoBM::setSmallerBlockSize (int blockSize)=0
 
virtual void cv::StereoMatcher::setSpeckleRange (int speckleRange)=0
 
virtual void cv::StereoMatcher::setSpeckleWindowSize (int speckleWindowSize)=0
 
virtual void cv::StereoBM::setTextureThreshold (int textureThreshold)=0
 
virtual void cv::StereoBM::setUniquenessRatio (int uniquenessRatio)=0
 
virtual void cv::StereoSGBM::setUniquenessRatio (int uniquenessRatio)=0
 
void cv::stereoRectify (InputArray cameraMatrix1, InputArray distCoeffs1, InputArray cameraMatrix2, InputArray distCoeffs2, Size imageSize, InputArray R, InputArray T, OutputArray R1, OutputArray R2, OutputArray P1, OutputArray P2, OutputArray Q, int flags=STEREO_ZERO_DISPARITY, double alpha=-1, Size newImageSize=Size(), Rect *validPixROI1=0, Rect *validPixROI2=0)
 Computes rectification transforms for each head of a calibrated stereo camera.
 
void cv::fisheye::stereoRectify (InputArray K1, InputArray D1, InputArray K2, InputArray D2, const Size &imageSize, InputArray R, InputArray tvec, OutputArray R1, OutputArray R2, OutputArray P1, OutputArray P2, OutputArray Q, int flags, const Size &newImageSize=Size(), double balance=0.0, double fov_scale=1.0)
 Stereo rectification for fisheye camera model.
 
bool cv::stereoRectifyUncalibrated (InputArray points1, InputArray points2, InputArray F, Size imgSize, OutputArray H1, OutputArray H2, double threshold=5)
 Computes a rectification transform for an uncalibrated stereo camera.
 
void cv::validateDisparity (InputOutputArray disparity, InputArray cost, int minDisparity, int numberOfDisparities, int disp12MaxDisp=1)
 validates disparity using the left-right check. The matrix "cost" should be computed by the stereo correspondence algorithm
 

Enumeration Type Documentation

◆ anonymous enum

anonymous enum
Enumerator
DISP_SHIFT 
DISP_SCALE 

◆ anonymous enum

anonymous enum
Enumerator
PREFILTER_NORMALIZED_RESPONSE 
PREFILTER_XSOBEL 

◆ anonymous enum

anonymous enum
Enumerator
MODE_SGBM 
MODE_HH 
MODE_SGBM_3WAY 
MODE_HH4 

Function Documentation

◆ compute()

virtual void cv::StereoMatcher::compute ( InputArray  left,
InputArray  right,
OutputArray  disparity 
)
pure virtual
Python:
cv.StereoMatcher.compute(left, right[, disparity]) -> disparity

#include <opencv2/stereo.hpp>

Computes disparity map for the specified stereo pair.

Parameters
leftLeft 8-bit single-channel image.
rightRight image of the same size and the same type as the left one.
disparityOutput disparity map. It has the same size as the input images. Some algorithms, like StereoBM or StereoSGBM compute 16-bit fixed-point disparity map (where each disparity value has 4 fractional bits), whereas other algorithms output 32-bit floating-point disparity map.

Implemented in cv::cuda::StereoSGM.

◆ create() [1/2]

static Ptr< StereoSGBM > cv::StereoSGBM::create ( int  minDisparity = 0,
int  numDisparities = 16,
int  blockSize = 3,
int  P1 = 0,
int  P2 = 0,
int  disp12MaxDiff = 0,
int  preFilterCap = 0,
int  uniquenessRatio = 0,
int  speckleWindowSize = 0,
int  speckleRange = 0,
int  mode = StereoSGBM::MODE_SGBM 
)
static
Python:
cv.StereoSGBM.create([, minDisparity[, numDisparities[, blockSize[, P1[, P2[, disp12MaxDiff[, preFilterCap[, uniquenessRatio[, speckleWindowSize[, speckleRange[, mode]]]]]]]]]]]) -> retval
cv.StereoSGBM_create([, minDisparity[, numDisparities[, blockSize[, P1[, P2[, disp12MaxDiff[, preFilterCap[, uniquenessRatio[, speckleWindowSize[, speckleRange[, mode]]]]]]]]]]]) -> retval

#include <opencv2/stereo.hpp>

Creates StereoSGBM object.

Parameters
minDisparityMinimum possible disparity value. Normally, it is zero but sometimes rectification algorithms can shift images, so this parameter needs to be adjusted accordingly.
numDisparitiesMaximum disparity minus minimum disparity. The value is always greater than zero. In the current implementation, this parameter must be divisible by 16.
blockSizeMatched block size. It must be an odd number >=1 . Normally, it should be somewhere in the 3..11 range.
P1The first parameter controlling the disparity smoothness. See below.
P2The second parameter controlling the disparity smoothness. The larger the values are, the smoother the disparity is. P1 is the penalty on the disparity change by plus or minus 1 between neighbor pixels. P2 is the penalty on the disparity change by more than 1 between neighbor pixels. The algorithm requires P2 > P1 . See stereo_match.cpp sample where some reasonably good P1 and P2 values are shown (like 8*number_of_image_channels*blockSize*blockSize and 32*number_of_image_channels*blockSize*blockSize , respectively).
disp12MaxDiffMaximum allowed difference (in integer pixel units) in the left-right disparity check. Set it to a non-positive value to disable the check.
preFilterCapTruncation value for the prefiltered image pixels. The algorithm first computes x-derivative at each pixel and clips its value by [-preFilterCap, preFilterCap] interval. The result values are passed to the Birchfield-Tomasi pixel cost function.
uniquenessRatioMargin in percentage by which the best (minimum) computed cost function value should "win" the second best value to consider the found match correct. Normally, a value within the 5-15 range is good enough.
speckleWindowSizeMaximum size of smooth disparity regions to consider their noise speckles and invalidate. Set it to 0 to disable speckle filtering. Otherwise, set it somewhere in the 50-200 range.
speckleRangeMaximum disparity variation within each connected component. If you do speckle filtering, set the parameter to a positive value, it will be implicitly multiplied by 16. Normally, 1 or 2 is good enough.
modeSet it to StereoSGBM::MODE_HH to run the full-scale two-pass dynamic programming algorithm. It will consume O(W*H*numDisparities) bytes, which is large for 640x480 stereo and huge for HD-size pictures. By default, it is set to false .

The first constructor initializes StereoSGBM with all the default parameters. So, you only have to set StereoSGBM::numDisparities at minimum. The second constructor enables you to set each parameter to a custom value.

◆ create() [2/2]

static Ptr< StereoBM > cv::StereoBM::create ( int  numDisparities = 0,
int  blockSize = 21 
)
static
Python:
cv.StereoBM.create([, numDisparities[, blockSize]]) -> retval
cv.StereoBM_create([, numDisparities[, blockSize]]) -> retval

#include <opencv2/stereo.hpp>

Creates StereoBM object.

Parameters
numDisparitiesthe disparity search range. For each pixel algorithm will find the best disparity from 0 (default minimum disparity) to numDisparities. The search range can then be shifted by changing the minimum disparity.
blockSizethe linear size of the blocks compared by the algorithm. The size should be odd (as the block is centered at the current pixel). Larger block size implies smoother, though less accurate disparity map. Smaller block size gives more detailed disparity map, but there is higher chance for algorithm to find a wrong correspondence.

The function create StereoBM object. You can then call StereoBM::compute() to compute disparity for a specific stereo pair.

◆ filterSpeckles()

void cv::filterSpeckles ( InputOutputArray  img,
double  newVal,
int  maxSpeckleSize,
double  maxDiff,
InputOutputArray  buf = noArray() 
)
Python:
cv.filterSpeckles(img, newVal, maxSpeckleSize, maxDiff[, buf]) -> img, buf

#include <opencv2/stereo.hpp>

Filters off small noise blobs (speckles) in the disparity map.

Parameters
imgThe input 16-bit signed disparity image
newValThe disparity value used to paint-off the speckles
maxSpeckleSizeThe maximum speckle size to consider it a speckle. Larger blobs are not affected by the algorithm
maxDiffMaximum difference between neighbor disparity pixels to put them into the same blob. Note that since StereoBM, StereoSGBM and may be other algorithms return a fixed-point disparity map, where disparity values are multiplied by 16, this scale factor should be taken into account when specifying this parameter value.
bufThe optional temporary buffer to avoid memory allocation within the function.

◆ getBlockSize()

virtual int cv::StereoMatcher::getBlockSize ( ) const
pure virtual
Python:
cv.StereoMatcher.getBlockSize() -> retval

#include <opencv2/stereo.hpp>

◆ getDisp12MaxDiff()

virtual int cv::StereoMatcher::getDisp12MaxDiff ( ) const
pure virtual
Python:
cv.StereoMatcher.getDisp12MaxDiff() -> retval

#include <opencv2/stereo.hpp>

◆ getMinDisparity()

virtual int cv::StereoMatcher::getMinDisparity ( ) const
pure virtual
Python:
cv.StereoMatcher.getMinDisparity() -> retval

#include <opencv2/stereo.hpp>

◆ getMode()

virtual int cv::StereoSGBM::getMode ( ) const
pure virtual
Python:
cv.StereoSGBM.getMode() -> retval

#include <opencv2/stereo.hpp>

◆ getNumDisparities()

virtual int cv::StereoMatcher::getNumDisparities ( ) const
pure virtual
Python:
cv.StereoMatcher.getNumDisparities() -> retval

#include <opencv2/stereo.hpp>

◆ getP1()

virtual int cv::StereoSGBM::getP1 ( ) const
pure virtual
Python:
cv.StereoSGBM.getP1() -> retval

#include <opencv2/stereo.hpp>

◆ getP2()

virtual int cv::StereoSGBM::getP2 ( ) const
pure virtual
Python:
cv.StereoSGBM.getP2() -> retval

#include <opencv2/stereo.hpp>

◆ getPreFilterCap() [1/2]

virtual int cv::StereoBM::getPreFilterCap ( ) const
pure virtual
Python:
cv.StereoBM.getPreFilterCap() -> retval

#include <opencv2/stereo.hpp>

◆ getPreFilterCap() [2/2]

virtual int cv::StereoSGBM::getPreFilterCap ( ) const
pure virtual
Python:
cv.StereoSGBM.getPreFilterCap() -> retval

#include <opencv2/stereo.hpp>

◆ getPreFilterSize()

virtual int cv::StereoBM::getPreFilterSize ( ) const
pure virtual
Python:
cv.StereoBM.getPreFilterSize() -> retval

#include <opencv2/stereo.hpp>

◆ getPreFilterType()

virtual int cv::StereoBM::getPreFilterType ( ) const
pure virtual
Python:
cv.StereoBM.getPreFilterType() -> retval

#include <opencv2/stereo.hpp>

◆ getROI1()

virtual Rect cv::StereoBM::getROI1 ( ) const
pure virtual
Python:
cv.StereoBM.getROI1() -> retval

#include <opencv2/stereo.hpp>

◆ getROI2()

virtual Rect cv::StereoBM::getROI2 ( ) const
pure virtual
Python:
cv.StereoBM.getROI2() -> retval

#include <opencv2/stereo.hpp>

◆ getSmallerBlockSize()

virtual int cv::StereoBM::getSmallerBlockSize ( ) const
pure virtual
Python:
cv.StereoBM.getSmallerBlockSize() -> retval

#include <opencv2/stereo.hpp>

◆ getSpeckleRange()

virtual int cv::StereoMatcher::getSpeckleRange ( ) const
pure virtual
Python:
cv.StereoMatcher.getSpeckleRange() -> retval

#include <opencv2/stereo.hpp>

◆ getSpeckleWindowSize()

virtual int cv::StereoMatcher::getSpeckleWindowSize ( ) const
pure virtual
Python:
cv.StereoMatcher.getSpeckleWindowSize() -> retval

#include <opencv2/stereo.hpp>

◆ getTextureThreshold()

virtual int cv::StereoBM::getTextureThreshold ( ) const
pure virtual
Python:
cv.StereoBM.getTextureThreshold() -> retval

#include <opencv2/stereo.hpp>

◆ getUniquenessRatio() [1/2]

virtual int cv::StereoBM::getUniquenessRatio ( ) const
pure virtual
Python:
cv.StereoBM.getUniquenessRatio() -> retval

#include <opencv2/stereo.hpp>

◆ getUniquenessRatio() [2/2]

virtual int cv::StereoSGBM::getUniquenessRatio ( ) const
pure virtual
Python:
cv.StereoSGBM.getUniquenessRatio() -> retval

#include <opencv2/stereo.hpp>

◆ getValidDisparityROI()

Rect cv::getValidDisparityROI ( Rect  roi1,
Rect  roi2,
int  minDisparity,
int  numberOfDisparities,
int  blockSize 
)
Python:
cv.getValidDisparityROI(roi1, roi2, minDisparity, numberOfDisparities, blockSize) -> retval

#include <opencv2/stereo.hpp>

computes valid disparity ROI from the valid ROIs of the rectified images (that are returned by stereoRectify)

◆ rectify3Collinear()

float cv::rectify3Collinear ( InputArray  _cameraMatrix1,
InputArray  _distCoeffs1,
InputArray  _cameraMatrix2,
InputArray  _distCoeffs2,
InputArray  _cameraMatrix3,
InputArray  _distCoeffs3,
InputArrayOfArrays  _imgpt1,
InputArrayOfArrays  _imgpt3,
Size  imageSize,
InputArray  _Rmat12,
InputArray  _Tmat12,
InputArray  _Rmat13,
InputArray  _Tmat13,
OutputArray  _Rmat1,
OutputArray  _Rmat2,
OutputArray  _Rmat3,
OutputArray  _Pmat1,
OutputArray  _Pmat2,
OutputArray  _Pmat3,
OutputArray  _Qmat,
double  alpha,
Size  newImgSize,
Rect roi1,
Rect roi2,
int  flags 
)

#include <opencv2/stereo.hpp>

◆ reprojectImageTo3D()

void cv::reprojectImageTo3D ( InputArray  disparity,
OutputArray  _3dImage,
InputArray  Q,
bool  handleMissingValues = false,
int  ddepth = -1 
)
Python:
cv.reprojectImageTo3D(disparity, Q[, _3dImage[, handleMissingValues[, ddepth]]]) -> _3dImage

#include <opencv2/stereo.hpp>

Reprojects a disparity image to 3D space.

Parameters
disparityInput single-channel 8-bit unsigned, 16-bit signed, 32-bit signed or 32-bit floating-point disparity image. The values of 8-bit / 16-bit signed formats are assumed to have no fractional bits. If the disparity is 16-bit signed format, as computed by StereoBM or StereoSGBM and maybe other algorithms, it should be divided by 16 (and scaled to float) before being used here.
_3dImageOutput 3-channel floating-point image of the same size as disparity. Each element of _3dImage(x,y) contains 3D coordinates of the point (x,y) computed from the disparity map. If one uses Q obtained by stereoRectify, then the returned points are represented in the first camera's rectified coordinate system.
Q\(4 \times 4\) perspective transformation matrix that can be obtained with stereoRectify.
handleMissingValuesIndicates, whether the function should handle missing values (i.e. points where the disparity was not computed). If handleMissingValues=true, then pixels with the minimal disparity that corresponds to the outliers (see StereoMatcher::compute ) are transformed to 3D points with a very large Z value (currently set to 10000).
ddepthThe optional output array depth. If it is -1, the output image will have CV_32F depth. ddepth can also be set to CV_16S, CV_32S or CV_32F.

The function transforms a single-channel disparity map to a 3-channel image representing a 3D surface. That is, for each pixel (x,y) and the corresponding disparity d=disparity(x,y) , it computes:

\[\begin{bmatrix} X \\ Y \\ Z \\ W \end{bmatrix} = Q \begin{bmatrix} x \\ y \\ \texttt{disparity} (x,y) \\ 1 \end{bmatrix}.\]

See also
To reproject a sparse set of points {(x,y,d),...} to 3D space, use perspectiveTransform.

◆ setBlockSize()

virtual void cv::StereoMatcher::setBlockSize ( int  blockSize)
pure virtual
Python:
cv.StereoMatcher.setBlockSize(blockSize) -> None

#include <opencv2/stereo.hpp>

◆ setDisp12MaxDiff()

virtual void cv::StereoMatcher::setDisp12MaxDiff ( int  disp12MaxDiff)
pure virtual
Python:
cv.StereoMatcher.setDisp12MaxDiff(disp12MaxDiff) -> None

#include <opencv2/stereo.hpp>

◆ setMinDisparity()

virtual void cv::StereoMatcher::setMinDisparity ( int  minDisparity)
pure virtual
Python:
cv.StereoMatcher.setMinDisparity(minDisparity) -> None

#include <opencv2/stereo.hpp>

◆ setMode()

virtual void cv::StereoSGBM::setMode ( int  mode)
pure virtual
Python:
cv.StereoSGBM.setMode(mode) -> None

#include <opencv2/stereo.hpp>

◆ setNumDisparities()

virtual void cv::StereoMatcher::setNumDisparities ( int  numDisparities)
pure virtual
Python:
cv.StereoMatcher.setNumDisparities(numDisparities) -> None

#include <opencv2/stereo.hpp>

◆ setP1()

virtual void cv::StereoSGBM::setP1 ( int  P1)
pure virtual
Python:
cv.StereoSGBM.setP1(P1) -> None

#include <opencv2/stereo.hpp>

◆ setP2()

virtual void cv::StereoSGBM::setP2 ( int  P2)
pure virtual
Python:
cv.StereoSGBM.setP2(P2) -> None

#include <opencv2/stereo.hpp>

◆ setPreFilterCap() [1/2]

virtual void cv::StereoBM::setPreFilterCap ( int  preFilterCap)
pure virtual
Python:
cv.StereoBM.setPreFilterCap(preFilterCap) -> None

#include <opencv2/stereo.hpp>

◆ setPreFilterCap() [2/2]

virtual void cv::StereoSGBM::setPreFilterCap ( int  preFilterCap)
pure virtual
Python:
cv.StereoSGBM.setPreFilterCap(preFilterCap) -> None

#include <opencv2/stereo.hpp>

◆ setPreFilterSize()

virtual void cv::StereoBM::setPreFilterSize ( int  preFilterSize)
pure virtual
Python:
cv.StereoBM.setPreFilterSize(preFilterSize) -> None

#include <opencv2/stereo.hpp>

◆ setPreFilterType()

virtual void cv::StereoBM::setPreFilterType ( int  preFilterType)
pure virtual
Python:
cv.StereoBM.setPreFilterType(preFilterType) -> None

#include <opencv2/stereo.hpp>

◆ setROI1()

virtual void cv::StereoBM::setROI1 ( Rect  roi1)
pure virtual
Python:
cv.StereoBM.setROI1(roi1) -> None

#include <opencv2/stereo.hpp>

◆ setROI2()

virtual void cv::StereoBM::setROI2 ( Rect  roi2)
pure virtual
Python:
cv.StereoBM.setROI2(roi2) -> None

#include <opencv2/stereo.hpp>

◆ setSmallerBlockSize()

virtual void cv::StereoBM::setSmallerBlockSize ( int  blockSize)
pure virtual
Python:
cv.StereoBM.setSmallerBlockSize(blockSize) -> None

#include <opencv2/stereo.hpp>

◆ setSpeckleRange()

virtual void cv::StereoMatcher::setSpeckleRange ( int  speckleRange)
pure virtual
Python:
cv.StereoMatcher.setSpeckleRange(speckleRange) -> None

#include <opencv2/stereo.hpp>

◆ setSpeckleWindowSize()

virtual void cv::StereoMatcher::setSpeckleWindowSize ( int  speckleWindowSize)
pure virtual
Python:
cv.StereoMatcher.setSpeckleWindowSize(speckleWindowSize) -> None

#include <opencv2/stereo.hpp>

◆ setTextureThreshold()

virtual void cv::StereoBM::setTextureThreshold ( int  textureThreshold)
pure virtual
Python:
cv.StereoBM.setTextureThreshold(textureThreshold) -> None

#include <opencv2/stereo.hpp>

◆ setUniquenessRatio() [1/2]

virtual void cv::StereoBM::setUniquenessRatio ( int  uniquenessRatio)
pure virtual
Python:
cv.StereoBM.setUniquenessRatio(uniquenessRatio) -> None

#include <opencv2/stereo.hpp>

◆ setUniquenessRatio() [2/2]

virtual void cv::StereoSGBM::setUniquenessRatio ( int  uniquenessRatio)
pure virtual
Python:
cv.StereoSGBM.setUniquenessRatio(uniquenessRatio) -> None

#include <opencv2/stereo.hpp>

◆ stereoRectify() [1/2]

void cv::stereoRectify ( InputArray  cameraMatrix1,
InputArray  distCoeffs1,
InputArray  cameraMatrix2,
InputArray  distCoeffs2,
Size  imageSize,
InputArray  R,
InputArray  T,
OutputArray  R1,
OutputArray  R2,
OutputArray  P1,
OutputArray  P2,
OutputArray  Q,
int  flags = STEREO_ZERO_DISPARITY,
double  alpha = -1,
Size  newImageSize = Size(),
Rect validPixROI1 = 0,
Rect validPixROI2 = 0 
)
Python:
cv.stereoRectify(cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize, R, T[, R1[, R2[, P1[, P2[, Q[, flags[, alpha[, newImageSize]]]]]]]]) -> R1, R2, P1, P2, Q, validPixROI1, validPixROI2

#include <opencv2/stereo.hpp>

Computes rectification transforms for each head of a calibrated stereo camera.

Parameters
cameraMatrix1First camera intrinsic matrix.
distCoeffs1First camera distortion parameters.
cameraMatrix2Second camera intrinsic matrix.
distCoeffs2Second camera distortion parameters.
imageSizeSize of the image used for stereo calibration.
RRotation matrix from the coordinate system of the first camera to the second camera, see stereoCalibrate.
TTranslation vector from the coordinate system of the first camera to the second camera, see stereoCalibrate.
R1Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix brings points given in the unrectified first camera's coordinate system to points in the rectified first camera's coordinate system. In more technical terms, it performs a change of basis from the unrectified first camera's coordinate system to the rectified first camera's coordinate system.
R2Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix brings points given in the unrectified second camera's coordinate system to points in the rectified second camera's coordinate system. In more technical terms, it performs a change of basis from the unrectified second camera's coordinate system to the rectified second camera's coordinate system.
P1Output 3x4 projection matrix in the new (rectified) coordinate systems for the first camera, i.e. it projects points given in the rectified first camera coordinate system into the rectified first camera's image.
P2Output 3x4 projection matrix in the new (rectified) coordinate systems for the second camera, i.e. it projects points given in the rectified first camera coordinate system into the rectified second camera's image.
QOutput \(4 \times 4\) disparity-to-depth mapping matrix (see reprojectImageTo3D).
flagsOperation flags that may be zero or STEREO_ZERO_DISPARITY . If the flag is set, the function makes the principal points of each camera have the same pixel coordinates in the rectified views. And if the flag is not set, the function may still shift the images in the horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the useful image area.
alphaFree scaling parameter. If it is -1 or absent, the function performs the default scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified images are zoomed and shifted so that only valid pixels are visible (no black areas after rectification). alpha=1 means that the rectified image is decimated and shifted so that all the pixels from the original images from the cameras are retained in the rectified images (no source image pixels are lost). Any intermediate value yields an intermediate result between those two extreme cases.
newImageSizeNew image resolution after rectification. The same size should be passed to initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0) is passed (default), it is set to the original imageSize . Setting it to a larger value can help you preserve details in the original image, especially when there is a big radial distortion.
validPixROI1Optional output rectangles inside the rectified images where all the pixels are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller (see the picture below).
validPixROI2Optional output rectangles inside the rectified images where all the pixels are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller (see the picture below).

The function computes the rotation matrices for each camera that (virtually) make both camera image planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies the dense stereo correspondence problem. The function takes the matrices computed by stereoCalibrate as input. As output, it provides two rotation matrices and also two projection matrices in the new coordinates. The function distinguishes the following two cases:

  • Horizontal stereo: the first and the second camera views are shifted relative to each other mainly along the x-axis (with possible small vertical shift). In the rectified images, the corresponding epipolar lines in the left and right cameras are horizontal and have the same y-coordinate. P1 and P2 look like:

\[\texttt{P1} = \begin{bmatrix} f & 0 & cx_1 & 0 \\ 0 & f & cy & 0 \\ 0 & 0 & 1 & 0 \end{bmatrix}\]

\[\texttt{P2} = \begin{bmatrix} f & 0 & cx_2 & T_x \cdot f \\ 0 & f & cy & 0 \\ 0 & 0 & 1 & 0 \end{bmatrix} ,\]

\[\texttt{Q} = \begin{bmatrix} 1 & 0 & 0 & -cx_1 \\ 0 & 1 & 0 & -cy \\ 0 & 0 & 0 & f \\ 0 & 0 & -\frac{1}{T_x} & \frac{cx_1 - cx_2}{T_x} \end{bmatrix} \]

where \(T_x\) is a horizontal shift between the cameras and \(cx_1=cx_2\) if STEREO_ZERO_DISPARITY is set.

  • Vertical stereo: the first and the second camera views are shifted relative to each other mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like:

\[\texttt{P1} = \begin{bmatrix} f & 0 & cx & 0 \\ 0 & f & cy_1 & 0 \\ 0 & 0 & 1 & 0 \end{bmatrix}\]

\[\texttt{P2} = \begin{bmatrix} f & 0 & cx & 0 \\ 0 & f & cy_2 & T_y \cdot f \\ 0 & 0 & 1 & 0 \end{bmatrix},\]

\[\texttt{Q} = \begin{bmatrix} 1 & 0 & 0 & -cx \\ 0 & 1 & 0 & -cy_1 \\ 0 & 0 & 0 & f \\ 0 & 0 & -\frac{1}{T_y} & \frac{cy_1 - cy_2}{T_y} \end{bmatrix} \]

where \(T_y\) is a vertical shift between the cameras and \(cy_1=cy_2\) if STEREO_ZERO_DISPARITY is set.

As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera matrices. The matrices, together with R1 and R2 , can then be passed to initUndistortRectifyMap to initialize the rectification map for each camera.

See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through the corresponding image regions. This means that the images are well rectified, which is what most stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that their interiors are all valid pixels.

image

◆ stereoRectify() [2/2]

void cv::fisheye::stereoRectify ( InputArray  K1,
InputArray  D1,
InputArray  K2,
InputArray  D2,
const Size imageSize,
InputArray  R,
InputArray  tvec,
OutputArray  R1,
OutputArray  R2,
OutputArray  P1,
OutputArray  P2,
OutputArray  Q,
int  flags,
const Size newImageSize = Size(),
double  balance = 0.0,
double  fov_scale = 1.0 
)
Python:
cv.fisheye.stereoRectify(K1, D1, K2, D2, imageSize, R, tvec, flags[, R1[, R2[, P1[, P2[, Q[, newImageSize[, balance[, fov_scale]]]]]]]]) -> R1, R2, P1, P2, Q

#include <opencv2/stereo.hpp>

Stereo rectification for fisheye camera model.

Parameters
K1First camera intrinsic matrix.
D1First camera distortion parameters.
K2Second camera intrinsic matrix.
D2Second camera distortion parameters.
imageSizeSize of the image used for stereo calibration.
RRotation matrix between the coordinate systems of the first and the second cameras.
tvecTranslation vector between coordinate systems of the cameras.
R1Output 3x3 rectification transform (rotation matrix) for the first camera.
R2Output 3x3 rectification transform (rotation matrix) for the second camera.
P1Output 3x4 projection matrix in the new (rectified) coordinate systems for the first camera.
P2Output 3x4 projection matrix in the new (rectified) coordinate systems for the second camera.
QOutput \(4 \times 4\) disparity-to-depth mapping matrix (see reprojectImageTo3D ).
flagsOperation flags that may be zero or cv::CALIB_ZERO_DISPARITY . If the flag is set, the function makes the principal points of each camera have the same pixel coordinates in the rectified views. And if the flag is not set, the function may still shift the images in the horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the useful image area.
newImageSizeNew image resolution after rectification. The same size should be passed to initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0) is passed (default), it is set to the original imageSize . Setting it to larger value can help you preserve details in the original image, especially when there is a big radial distortion.
balanceSets the new focal length in range between the min focal length and the max focal length. Balance is in range of [0, 1].
fov_scaleDivisor for new focal length.

◆ stereoRectifyUncalibrated()

bool cv::stereoRectifyUncalibrated ( InputArray  points1,
InputArray  points2,
InputArray  F,
Size  imgSize,
OutputArray  H1,
OutputArray  H2,
double  threshold = 5 
)
Python:
cv.stereoRectifyUncalibrated(points1, points2, F, imgSize[, H1[, H2[, threshold]]]) -> retval, H1, H2

#include <opencv2/stereo.hpp>

Computes a rectification transform for an uncalibrated stereo camera.

Parameters
points1Array of feature points in the first image.
points2The corresponding points in the second image. The same formats as in findFundamentalMat are supported.
FInput fundamental matrix. It can be computed from the same set of point pairs using findFundamentalMat .
imgSizeSize of the image.
H1Output rectification homography matrix for the first image.
H2Output rectification homography matrix for the second image.
thresholdOptional threshold used to filter out the outliers. If the parameter is greater than zero, all the point pairs that do not comply with the epipolar geometry (that is, the points for which \(|\texttt{points2[i]}^T \cdot \texttt{F} \cdot \texttt{points1[i]}|>\texttt{threshold}\) ) are rejected prior to computing the homographies. Otherwise, all the points are considered inliers.

The function computes the rectification transformations without knowing intrinsic parameters of the cameras and their relative position in the space, which explains the suffix "uncalibrated". Another related difference from stereoRectify is that the function outputs not the rectification transformations in the object (3D) space, but the planar perspective transformations encoded by the homography matrices H1 and H2 . The function implements the algorithm [119] .

Note
While the algorithm does not need to know the intrinsic parameters of the cameras, it heavily depends on the epipolar geometry. Therefore, if the camera lenses have a significant distortion, it would be better to correct it before computing the fundamental matrix and calling this function. For example, distortion coefficients can be estimated for each head of stereo camera separately by using calibrateCamera . Then, the images can be corrected using undistort , or just the point coordinates can be corrected with undistortPoints .

◆ validateDisparity()

void cv::validateDisparity ( InputOutputArray  disparity,
InputArray  cost,
int  minDisparity,
int  numberOfDisparities,
int  disp12MaxDisp = 1 
)
Python:
cv.validateDisparity(disparity, cost, minDisparity, numberOfDisparities[, disp12MaxDisp]) -> disparity

#include <opencv2/stereo.hpp>

validates disparity using the left-right check. The matrix "cost" should be computed by the stereo correspondence algorithm