OpenCV  3.4.0
Open Source Computer Vision
Enumerations | Functions
Miscellaneous Image Transformations

Enumerations

enum  cv::AdaptiveThresholdTypes {
  cv::ADAPTIVE_THRESH_MEAN_C = 0,
  cv::ADAPTIVE_THRESH_GAUSSIAN_C = 1
}
 
enum  cv::ColorConversionCodes {
  cv::COLOR_BGR2BGRA = 0,
  cv::COLOR_RGB2RGBA = COLOR_BGR2BGRA,
  cv::COLOR_BGRA2BGR = 1,
  cv::COLOR_RGBA2RGB = COLOR_BGRA2BGR,
  cv::COLOR_BGR2RGBA = 2,
  cv::COLOR_RGB2BGRA = COLOR_BGR2RGBA,
  cv::COLOR_RGBA2BGR = 3,
  cv::COLOR_BGRA2RGB = COLOR_RGBA2BGR,
  cv::COLOR_BGR2RGB = 4,
  cv::COLOR_RGB2BGR = COLOR_BGR2RGB,
  cv::COLOR_BGRA2RGBA = 5,
  cv::COLOR_RGBA2BGRA = COLOR_BGRA2RGBA,
  cv::COLOR_BGR2GRAY = 6,
  cv::COLOR_RGB2GRAY = 7,
  cv::COLOR_GRAY2BGR = 8,
  cv::COLOR_GRAY2RGB = COLOR_GRAY2BGR,
  cv::COLOR_GRAY2BGRA = 9,
  cv::COLOR_GRAY2RGBA = COLOR_GRAY2BGRA,
  cv::COLOR_BGRA2GRAY = 10,
  cv::COLOR_RGBA2GRAY = 11,
  cv::COLOR_BGR2BGR565 = 12,
  cv::COLOR_RGB2BGR565 = 13,
  cv::COLOR_BGR5652BGR = 14,
  cv::COLOR_BGR5652RGB = 15,
  cv::COLOR_BGRA2BGR565 = 16,
  cv::COLOR_RGBA2BGR565 = 17,
  cv::COLOR_BGR5652BGRA = 18,
  cv::COLOR_BGR5652RGBA = 19,
  cv::COLOR_GRAY2BGR565 = 20,
  cv::COLOR_BGR5652GRAY = 21,
  cv::COLOR_BGR2BGR555 = 22,
  cv::COLOR_RGB2BGR555 = 23,
  cv::COLOR_BGR5552BGR = 24,
  cv::COLOR_BGR5552RGB = 25,
  cv::COLOR_BGRA2BGR555 = 26,
  cv::COLOR_RGBA2BGR555 = 27,
  cv::COLOR_BGR5552BGRA = 28,
  cv::COLOR_BGR5552RGBA = 29,
  cv::COLOR_GRAY2BGR555 = 30,
  cv::COLOR_BGR5552GRAY = 31,
  cv::COLOR_BGR2XYZ = 32,
  cv::COLOR_RGB2XYZ = 33,
  cv::COLOR_XYZ2BGR = 34,
  cv::COLOR_XYZ2RGB = 35,
  cv::COLOR_BGR2YCrCb = 36,
  cv::COLOR_RGB2YCrCb = 37,
  cv::COLOR_YCrCb2BGR = 38,
  cv::COLOR_YCrCb2RGB = 39,
  cv::COLOR_BGR2HSV = 40,
  cv::COLOR_RGB2HSV = 41,
  cv::COLOR_BGR2Lab = 44,
  cv::COLOR_RGB2Lab = 45,
  cv::COLOR_BGR2Luv = 50,
  cv::COLOR_RGB2Luv = 51,
  cv::COLOR_BGR2HLS = 52,
  cv::COLOR_RGB2HLS = 53,
  cv::COLOR_HSV2BGR = 54,
  cv::COLOR_HSV2RGB = 55,
  cv::COLOR_Lab2BGR = 56,
  cv::COLOR_Lab2RGB = 57,
  cv::COLOR_Luv2BGR = 58,
  cv::COLOR_Luv2RGB = 59,
  cv::COLOR_HLS2BGR = 60,
  cv::COLOR_HLS2RGB = 61,
  cv::COLOR_BGR2HSV_FULL = 66,
  cv::COLOR_RGB2HSV_FULL = 67,
  cv::COLOR_BGR2HLS_FULL = 68,
  cv::COLOR_RGB2HLS_FULL = 69,
  cv::COLOR_HSV2BGR_FULL = 70,
  cv::COLOR_HSV2RGB_FULL = 71,
  cv::COLOR_HLS2BGR_FULL = 72,
  cv::COLOR_HLS2RGB_FULL = 73,
  cv::COLOR_LBGR2Lab = 74,
  cv::COLOR_LRGB2Lab = 75,
  cv::COLOR_LBGR2Luv = 76,
  cv::COLOR_LRGB2Luv = 77,
  cv::COLOR_Lab2LBGR = 78,
  cv::COLOR_Lab2LRGB = 79,
  cv::COLOR_Luv2LBGR = 80,
  cv::COLOR_Luv2LRGB = 81,
  cv::COLOR_BGR2YUV = 82,
  cv::COLOR_RGB2YUV = 83,
  cv::COLOR_YUV2BGR = 84,
  cv::COLOR_YUV2RGB = 85,
  cv::COLOR_YUV2RGB_NV12 = 90,
  cv::COLOR_YUV2BGR_NV12 = 91,
  cv::COLOR_YUV2RGB_NV21 = 92,
  cv::COLOR_YUV2BGR_NV21 = 93,
  cv::COLOR_YUV420sp2RGB = COLOR_YUV2RGB_NV21,
  cv::COLOR_YUV420sp2BGR = COLOR_YUV2BGR_NV21,
  cv::COLOR_YUV2RGBA_NV12 = 94,
  cv::COLOR_YUV2BGRA_NV12 = 95,
  cv::COLOR_YUV2RGBA_NV21 = 96,
  cv::COLOR_YUV2BGRA_NV21 = 97,
  cv::COLOR_YUV420sp2RGBA = COLOR_YUV2RGBA_NV21,
  cv::COLOR_YUV420sp2BGRA = COLOR_YUV2BGRA_NV21,
  cv::COLOR_YUV2RGB_YV12 = 98,
  cv::COLOR_YUV2BGR_YV12 = 99,
  cv::COLOR_YUV2RGB_IYUV = 100,
  cv::COLOR_YUV2BGR_IYUV = 101,
  cv::COLOR_YUV2RGB_I420 = COLOR_YUV2RGB_IYUV,
  cv::COLOR_YUV2BGR_I420 = COLOR_YUV2BGR_IYUV,
  cv::COLOR_YUV420p2RGB = COLOR_YUV2RGB_YV12,
  cv::COLOR_YUV420p2BGR = COLOR_YUV2BGR_YV12,
  cv::COLOR_YUV2RGBA_YV12 = 102,
  cv::COLOR_YUV2BGRA_YV12 = 103,
  cv::COLOR_YUV2RGBA_IYUV = 104,
  cv::COLOR_YUV2BGRA_IYUV = 105,
  cv::COLOR_YUV2RGBA_I420 = COLOR_YUV2RGBA_IYUV,
  cv::COLOR_YUV2BGRA_I420 = COLOR_YUV2BGRA_IYUV,
  cv::COLOR_YUV420p2RGBA = COLOR_YUV2RGBA_YV12,
  cv::COLOR_YUV420p2BGRA = COLOR_YUV2BGRA_YV12,
  cv::COLOR_YUV2GRAY_420 = 106,
  cv::COLOR_YUV2GRAY_NV21 = COLOR_YUV2GRAY_420,
  cv::COLOR_YUV2GRAY_NV12 = COLOR_YUV2GRAY_420,
  cv::COLOR_YUV2GRAY_YV12 = COLOR_YUV2GRAY_420,
  cv::COLOR_YUV2GRAY_IYUV = COLOR_YUV2GRAY_420,
  cv::COLOR_YUV2GRAY_I420 = COLOR_YUV2GRAY_420,
  cv::COLOR_YUV420sp2GRAY = COLOR_YUV2GRAY_420,
  cv::COLOR_YUV420p2GRAY = COLOR_YUV2GRAY_420,
  cv::COLOR_YUV2RGB_UYVY = 107,
  cv::COLOR_YUV2BGR_UYVY = 108,
  cv::COLOR_YUV2RGB_Y422 = COLOR_YUV2RGB_UYVY,
  cv::COLOR_YUV2BGR_Y422 = COLOR_YUV2BGR_UYVY,
  cv::COLOR_YUV2RGB_UYNV = COLOR_YUV2RGB_UYVY,
  cv::COLOR_YUV2BGR_UYNV = COLOR_YUV2BGR_UYVY,
  cv::COLOR_YUV2RGBA_UYVY = 111,
  cv::COLOR_YUV2BGRA_UYVY = 112,
  cv::COLOR_YUV2RGBA_Y422 = COLOR_YUV2RGBA_UYVY,
  cv::COLOR_YUV2BGRA_Y422 = COLOR_YUV2BGRA_UYVY,
  cv::COLOR_YUV2RGBA_UYNV = COLOR_YUV2RGBA_UYVY,
  cv::COLOR_YUV2BGRA_UYNV = COLOR_YUV2BGRA_UYVY,
  cv::COLOR_YUV2RGB_YUY2 = 115,
  cv::COLOR_YUV2BGR_YUY2 = 116,
  cv::COLOR_YUV2RGB_YVYU = 117,
  cv::COLOR_YUV2BGR_YVYU = 118,
  cv::COLOR_YUV2RGB_YUYV = COLOR_YUV2RGB_YUY2,
  cv::COLOR_YUV2BGR_YUYV = COLOR_YUV2BGR_YUY2,
  cv::COLOR_YUV2RGB_YUNV = COLOR_YUV2RGB_YUY2,
  cv::COLOR_YUV2BGR_YUNV = COLOR_YUV2BGR_YUY2,
  cv::COLOR_YUV2RGBA_YUY2 = 119,
  cv::COLOR_YUV2BGRA_YUY2 = 120,
  cv::COLOR_YUV2RGBA_YVYU = 121,
  cv::COLOR_YUV2BGRA_YVYU = 122,
  cv::COLOR_YUV2RGBA_YUYV = COLOR_YUV2RGBA_YUY2,
  cv::COLOR_YUV2BGRA_YUYV = COLOR_YUV2BGRA_YUY2,
  cv::COLOR_YUV2RGBA_YUNV = COLOR_YUV2RGBA_YUY2,
  cv::COLOR_YUV2BGRA_YUNV = COLOR_YUV2BGRA_YUY2,
  cv::COLOR_YUV2GRAY_UYVY = 123,
  cv::COLOR_YUV2GRAY_YUY2 = 124,
  cv::COLOR_YUV2GRAY_Y422 = COLOR_YUV2GRAY_UYVY,
  cv::COLOR_YUV2GRAY_UYNV = COLOR_YUV2GRAY_UYVY,
  cv::COLOR_YUV2GRAY_YVYU = COLOR_YUV2GRAY_YUY2,
  cv::COLOR_YUV2GRAY_YUYV = COLOR_YUV2GRAY_YUY2,
  cv::COLOR_YUV2GRAY_YUNV = COLOR_YUV2GRAY_YUY2,
  cv::COLOR_RGBA2mRGBA = 125,
  cv::COLOR_mRGBA2RGBA = 126,
  cv::COLOR_RGB2YUV_I420 = 127,
  cv::COLOR_BGR2YUV_I420 = 128,
  cv::COLOR_RGB2YUV_IYUV = COLOR_RGB2YUV_I420,
  cv::COLOR_BGR2YUV_IYUV = COLOR_BGR2YUV_I420,
  cv::COLOR_RGBA2YUV_I420 = 129,
  cv::COLOR_BGRA2YUV_I420 = 130,
  cv::COLOR_RGBA2YUV_IYUV = COLOR_RGBA2YUV_I420,
  cv::COLOR_BGRA2YUV_IYUV = COLOR_BGRA2YUV_I420,
  cv::COLOR_RGB2YUV_YV12 = 131,
  cv::COLOR_BGR2YUV_YV12 = 132,
  cv::COLOR_RGBA2YUV_YV12 = 133,
  cv::COLOR_BGRA2YUV_YV12 = 134,
  cv::COLOR_BayerBG2BGR = 46,
  cv::COLOR_BayerGB2BGR = 47,
  cv::COLOR_BayerRG2BGR = 48,
  cv::COLOR_BayerGR2BGR = 49,
  cv::COLOR_BayerBG2RGB = COLOR_BayerRG2BGR,
  cv::COLOR_BayerGB2RGB = COLOR_BayerGR2BGR,
  cv::COLOR_BayerRG2RGB = COLOR_BayerBG2BGR,
  cv::COLOR_BayerGR2RGB = COLOR_BayerGB2BGR,
  cv::COLOR_BayerBG2GRAY = 86,
  cv::COLOR_BayerGB2GRAY = 87,
  cv::COLOR_BayerRG2GRAY = 88,
  cv::COLOR_BayerGR2GRAY = 89,
  cv::COLOR_BayerBG2BGR_VNG = 62,
  cv::COLOR_BayerGB2BGR_VNG = 63,
  cv::COLOR_BayerRG2BGR_VNG = 64,
  cv::COLOR_BayerGR2BGR_VNG = 65,
  cv::COLOR_BayerBG2RGB_VNG = COLOR_BayerRG2BGR_VNG,
  cv::COLOR_BayerGB2RGB_VNG = COLOR_BayerGR2BGR_VNG,
  cv::COLOR_BayerRG2RGB_VNG = COLOR_BayerBG2BGR_VNG,
  cv::COLOR_BayerGR2RGB_VNG = COLOR_BayerGB2BGR_VNG,
  cv::COLOR_BayerBG2BGR_EA = 135,
  cv::COLOR_BayerGB2BGR_EA = 136,
  cv::COLOR_BayerRG2BGR_EA = 137,
  cv::COLOR_BayerGR2BGR_EA = 138,
  cv::COLOR_BayerBG2RGB_EA = COLOR_BayerRG2BGR_EA,
  cv::COLOR_BayerGB2RGB_EA = COLOR_BayerGR2BGR_EA,
  cv::COLOR_BayerRG2RGB_EA = COLOR_BayerBG2BGR_EA,
  cv::COLOR_BayerGR2RGB_EA = COLOR_BayerGB2BGR_EA,
  cv::COLOR_BayerBG2BGRA = 139,
  cv::COLOR_BayerGB2BGRA = 140,
  cv::COLOR_BayerRG2BGRA = 141,
  cv::COLOR_BayerGR2BGRA = 142,
  cv::COLOR_BayerBG2RGBA = COLOR_BayerRG2BGRA,
  cv::COLOR_BayerGB2RGBA = COLOR_BayerGR2BGRA,
  cv::COLOR_BayerRG2RGBA = COLOR_BayerBG2BGRA,
  cv::COLOR_BayerGR2RGBA = COLOR_BayerGB2BGRA,
  cv::COLOR_COLORCVT_MAX = 143
}
 
enum  cv::DistanceTransformLabelTypes {
  cv::DIST_LABEL_CCOMP = 0,
  cv::DIST_LABEL_PIXEL = 1
}
 distanceTransform algorithm flags More...
 
enum  cv::DistanceTransformMasks {
  cv::DIST_MASK_3 = 3,
  cv::DIST_MASK_5 = 5,
  cv::DIST_MASK_PRECISE = 0
}
 Mask size for distance transform. More...
 
enum  cv::DistanceTypes {
  cv::DIST_USER = -1,
  cv::DIST_L1 = 1,
  cv::DIST_L2 = 2,
  cv::DIST_C = 3,
  cv::DIST_L12 = 4,
  cv::DIST_FAIR = 5,
  cv::DIST_WELSCH = 6,
  cv::DIST_HUBER = 7
}
 
enum  cv::FloodFillFlags {
  cv::FLOODFILL_FIXED_RANGE = 1 << 16,
  cv::FLOODFILL_MASK_ONLY = 1 << 17
}
 floodfill algorithm flags More...
 
enum  cv::GrabCutClasses {
  cv::GC_BGD = 0,
  cv::GC_FGD = 1,
  cv::GC_PR_BGD = 2,
  cv::GC_PR_FGD = 3
}
 class of the pixel in GrabCut algorithm More...
 
enum  cv::GrabCutModes {
  cv::GC_INIT_WITH_RECT = 0,
  cv::GC_INIT_WITH_MASK = 1,
  cv::GC_EVAL = 2
}
 GrabCut algorithm flags. More...
 
enum  cv::ThresholdTypes {
  cv::THRESH_BINARY = 0,
  cv::THRESH_BINARY_INV = 1,
  cv::THRESH_TRUNC = 2,
  cv::THRESH_TOZERO = 3,
  cv::THRESH_TOZERO_INV = 4,
  cv::THRESH_MASK = 7,
  cv::THRESH_OTSU = 8,
  cv::THRESH_TRIANGLE = 16
}
 
enum  cv::UndistortTypes {
  cv::PROJ_SPHERICAL_ORTHO = 0,
  cv::PROJ_SPHERICAL_EQRECT = 1
}
 cv::undistort mode More...
 

Functions

void cv::adaptiveThreshold (InputArray src, OutputArray dst, double maxValue, int adaptiveMethod, int thresholdType, int blockSize, double C)
 Applies an adaptive threshold to an array. More...
 
void cv::cvtColor (InputArray src, OutputArray dst, int code, int dstCn=0)
 Converts an image from one color space to another. More...
 
void cv::distanceTransform (InputArray src, OutputArray dst, OutputArray labels, int distanceType, int maskSize, int labelType=DIST_LABEL_CCOMP)
 Calculates the distance to the closest zero pixel for each pixel of the source image. More...
 
void cv::distanceTransform (InputArray src, OutputArray dst, int distanceType, int maskSize, int dstType=CV_32F)
 
int cv::floodFill (InputOutputArray image, Point seedPoint, Scalar newVal, Rect *rect=0, Scalar loDiff=Scalar(), Scalar upDiff=Scalar(), int flags=4)
 
int cv::floodFill (InputOutputArray image, InputOutputArray mask, Point seedPoint, Scalar newVal, Rect *rect=0, Scalar loDiff=Scalar(), Scalar upDiff=Scalar(), int flags=4)
 Fills a connected component with the given color. More...
 
void cv::grabCut (InputArray img, InputOutputArray mask, Rect rect, InputOutputArray bgdModel, InputOutputArray fgdModel, int iterCount, int mode=GC_EVAL)
 Runs the GrabCut algorithm. More...
 
void cv::integral (InputArray src, OutputArray sum, int sdepth=-1)
 
void cv::integral (InputArray src, OutputArray sum, OutputArray sqsum, int sdepth=-1, int sqdepth=-1)
 
void cv::integral (InputArray src, OutputArray sum, OutputArray sqsum, OutputArray tilted, int sdepth=-1, int sqdepth=-1)
 Calculates the integral of an image. More...
 
double cv::threshold (InputArray src, OutputArray dst, double thresh, double maxval, int type)
 Applies a fixed-level threshold to each array element. More...
 
void cv::watershed (InputArray image, InputOutputArray markers)
 Performs a marker-based image segmentation using the watershed algorithm. More...
 

Detailed Description

Enumeration Type Documentation

§ AdaptiveThresholdTypes

adaptive threshold algorithm see cv::adaptiveThreshold

Enumerator
ADAPTIVE_THRESH_MEAN_C 
Python: cv.ADAPTIVE_THRESH_MEAN_C

the threshold value \(T(x,y)\) is a mean of the \(\texttt{blockSize} \times \texttt{blockSize}\) neighborhood of \((x, y)\) minus C

ADAPTIVE_THRESH_GAUSSIAN_C 
Python: cv.ADAPTIVE_THRESH_GAUSSIAN_C

the threshold value \(T(x, y)\) is a weighted sum (cross-correlation with a Gaussian window) of the \(\texttt{blockSize} \times \texttt{blockSize}\) neighborhood of \((x, y)\) minus C . The default sigma (standard deviation) is used for the specified blockSize . See cv::getGaussianKernel

§ ColorConversionCodes

the color conversion code

See also
Color conversions
Enumerator
COLOR_BGR2BGRA 
Python: cv.COLOR_BGR2BGRA

add alpha channel to RGB or BGR image

COLOR_RGB2RGBA 
Python: cv.COLOR_RGB2RGBA
COLOR_BGRA2BGR 
Python: cv.COLOR_BGRA2BGR

remove alpha channel from RGB or BGR image

COLOR_RGBA2RGB 
Python: cv.COLOR_RGBA2RGB
COLOR_BGR2RGBA 
Python: cv.COLOR_BGR2RGBA

convert between RGB and BGR color spaces (with or without alpha channel)

COLOR_RGB2BGRA 
Python: cv.COLOR_RGB2BGRA
COLOR_RGBA2BGR 
Python: cv.COLOR_RGBA2BGR
COLOR_BGRA2RGB 
Python: cv.COLOR_BGRA2RGB
COLOR_BGR2RGB 
Python: cv.COLOR_BGR2RGB
COLOR_RGB2BGR 
Python: cv.COLOR_RGB2BGR
COLOR_BGRA2RGBA 
Python: cv.COLOR_BGRA2RGBA
COLOR_RGBA2BGRA 
Python: cv.COLOR_RGBA2BGRA
COLOR_BGR2GRAY 
Python: cv.COLOR_BGR2GRAY

convert between RGB/BGR and grayscale, color conversions

COLOR_RGB2GRAY 
Python: cv.COLOR_RGB2GRAY
COLOR_GRAY2BGR 
Python: cv.COLOR_GRAY2BGR
COLOR_GRAY2RGB 
Python: cv.COLOR_GRAY2RGB
COLOR_GRAY2BGRA 
Python: cv.COLOR_GRAY2BGRA
COLOR_GRAY2RGBA 
Python: cv.COLOR_GRAY2RGBA
COLOR_BGRA2GRAY 
Python: cv.COLOR_BGRA2GRAY
COLOR_RGBA2GRAY 
Python: cv.COLOR_RGBA2GRAY
COLOR_BGR2BGR565 
Python: cv.COLOR_BGR2BGR565

convert between RGB/BGR and BGR565 (16-bit images)

COLOR_RGB2BGR565 
Python: cv.COLOR_RGB2BGR565
COLOR_BGR5652BGR 
Python: cv.COLOR_BGR5652BGR
COLOR_BGR5652RGB 
Python: cv.COLOR_BGR5652RGB
COLOR_BGRA2BGR565 
Python: cv.COLOR_BGRA2BGR565
COLOR_RGBA2BGR565 
Python: cv.COLOR_RGBA2BGR565
COLOR_BGR5652BGRA 
Python: cv.COLOR_BGR5652BGRA
COLOR_BGR5652RGBA 
Python: cv.COLOR_BGR5652RGBA
COLOR_GRAY2BGR565 
Python: cv.COLOR_GRAY2BGR565

convert between grayscale to BGR565 (16-bit images)

COLOR_BGR5652GRAY 
Python: cv.COLOR_BGR5652GRAY
COLOR_BGR2BGR555 
Python: cv.COLOR_BGR2BGR555

convert between RGB/BGR and BGR555 (16-bit images)

COLOR_RGB2BGR555 
Python: cv.COLOR_RGB2BGR555
COLOR_BGR5552BGR 
Python: cv.COLOR_BGR5552BGR
COLOR_BGR5552RGB 
Python: cv.COLOR_BGR5552RGB
COLOR_BGRA2BGR555 
Python: cv.COLOR_BGRA2BGR555
COLOR_RGBA2BGR555 
Python: cv.COLOR_RGBA2BGR555
COLOR_BGR5552BGRA 
Python: cv.COLOR_BGR5552BGRA
COLOR_BGR5552RGBA 
Python: cv.COLOR_BGR5552RGBA
COLOR_GRAY2BGR555 
Python: cv.COLOR_GRAY2BGR555

convert between grayscale and BGR555 (16-bit images)

COLOR_BGR5552GRAY 
Python: cv.COLOR_BGR5552GRAY
COLOR_BGR2XYZ 
Python: cv.COLOR_BGR2XYZ

convert RGB/BGR to CIE XYZ, color conversions

COLOR_RGB2XYZ 
Python: cv.COLOR_RGB2XYZ
COLOR_XYZ2BGR 
Python: cv.COLOR_XYZ2BGR
COLOR_XYZ2RGB 
Python: cv.COLOR_XYZ2RGB
COLOR_BGR2YCrCb 
Python: cv.COLOR_BGR2YCrCb

convert RGB/BGR to luma-chroma (aka YCC), color conversions

COLOR_RGB2YCrCb 
Python: cv.COLOR_RGB2YCrCb
COLOR_YCrCb2BGR 
Python: cv.COLOR_YCrCb2BGR
COLOR_YCrCb2RGB 
Python: cv.COLOR_YCrCb2RGB
COLOR_BGR2HSV 
Python: cv.COLOR_BGR2HSV

convert RGB/BGR to HSV (hue saturation value), color conversions

COLOR_RGB2HSV 
Python: cv.COLOR_RGB2HSV
COLOR_BGR2Lab 
Python: cv.COLOR_BGR2Lab

convert RGB/BGR to CIE Lab, color conversions

COLOR_RGB2Lab 
Python: cv.COLOR_RGB2Lab
COLOR_BGR2Luv 
Python: cv.COLOR_BGR2Luv

convert RGB/BGR to CIE Luv, color conversions

COLOR_RGB2Luv 
Python: cv.COLOR_RGB2Luv
COLOR_BGR2HLS 
Python: cv.COLOR_BGR2HLS

convert RGB/BGR to HLS (hue lightness saturation), color conversions

COLOR_RGB2HLS 
Python: cv.COLOR_RGB2HLS
COLOR_HSV2BGR 
Python: cv.COLOR_HSV2BGR

backward conversions to RGB/BGR

COLOR_HSV2RGB 
Python: cv.COLOR_HSV2RGB
COLOR_Lab2BGR 
Python: cv.COLOR_Lab2BGR
COLOR_Lab2RGB 
Python: cv.COLOR_Lab2RGB
COLOR_Luv2BGR 
Python: cv.COLOR_Luv2BGR
COLOR_Luv2RGB 
Python: cv.COLOR_Luv2RGB
COLOR_HLS2BGR 
Python: cv.COLOR_HLS2BGR
COLOR_HLS2RGB 
Python: cv.COLOR_HLS2RGB
COLOR_BGR2HSV_FULL 
Python: cv.COLOR_BGR2HSV_FULL
COLOR_RGB2HSV_FULL 
Python: cv.COLOR_RGB2HSV_FULL
COLOR_BGR2HLS_FULL 
Python: cv.COLOR_BGR2HLS_FULL
COLOR_RGB2HLS_FULL 
Python: cv.COLOR_RGB2HLS_FULL
COLOR_HSV2BGR_FULL 
Python: cv.COLOR_HSV2BGR_FULL
COLOR_HSV2RGB_FULL 
Python: cv.COLOR_HSV2RGB_FULL
COLOR_HLS2BGR_FULL 
Python: cv.COLOR_HLS2BGR_FULL
COLOR_HLS2RGB_FULL 
Python: cv.COLOR_HLS2RGB_FULL
COLOR_LBGR2Lab 
Python: cv.COLOR_LBGR2Lab
COLOR_LRGB2Lab 
Python: cv.COLOR_LRGB2Lab
COLOR_LBGR2Luv 
Python: cv.COLOR_LBGR2Luv
COLOR_LRGB2Luv 
Python: cv.COLOR_LRGB2Luv
COLOR_Lab2LBGR 
Python: cv.COLOR_Lab2LBGR
COLOR_Lab2LRGB 
Python: cv.COLOR_Lab2LRGB
COLOR_Luv2LBGR 
Python: cv.COLOR_Luv2LBGR
COLOR_Luv2LRGB 
Python: cv.COLOR_Luv2LRGB
COLOR_BGR2YUV 
Python: cv.COLOR_BGR2YUV

convert between RGB/BGR and YUV

COLOR_RGB2YUV 
Python: cv.COLOR_RGB2YUV
COLOR_YUV2BGR 
Python: cv.COLOR_YUV2BGR
COLOR_YUV2RGB 
Python: cv.COLOR_YUV2RGB
COLOR_YUV2RGB_NV12 
Python: cv.COLOR_YUV2RGB_NV12

YUV 4:2:0 family to RGB.

COLOR_YUV2BGR_NV12 
Python: cv.COLOR_YUV2BGR_NV12
COLOR_YUV2RGB_NV21 
Python: cv.COLOR_YUV2RGB_NV21
COLOR_YUV2BGR_NV21 
Python: cv.COLOR_YUV2BGR_NV21
COLOR_YUV420sp2RGB 
Python: cv.COLOR_YUV420sp2RGB
COLOR_YUV420sp2BGR 
Python: cv.COLOR_YUV420sp2BGR
COLOR_YUV2RGBA_NV12 
Python: cv.COLOR_YUV2RGBA_NV12
COLOR_YUV2BGRA_NV12 
Python: cv.COLOR_YUV2BGRA_NV12
COLOR_YUV2RGBA_NV21 
Python: cv.COLOR_YUV2RGBA_NV21
COLOR_YUV2BGRA_NV21 
Python: cv.COLOR_YUV2BGRA_NV21
COLOR_YUV420sp2RGBA 
Python: cv.COLOR_YUV420sp2RGBA
COLOR_YUV420sp2BGRA 
Python: cv.COLOR_YUV420sp2BGRA
COLOR_YUV2RGB_YV12 
Python: cv.COLOR_YUV2RGB_YV12
COLOR_YUV2BGR_YV12 
Python: cv.COLOR_YUV2BGR_YV12
COLOR_YUV2RGB_IYUV 
Python: cv.COLOR_YUV2RGB_IYUV
COLOR_YUV2BGR_IYUV 
Python: cv.COLOR_YUV2BGR_IYUV
COLOR_YUV2RGB_I420 
Python: cv.COLOR_YUV2RGB_I420
COLOR_YUV2BGR_I420 
Python: cv.COLOR_YUV2BGR_I420
COLOR_YUV420p2RGB 
Python: cv.COLOR_YUV420p2RGB
COLOR_YUV420p2BGR 
Python: cv.COLOR_YUV420p2BGR
COLOR_YUV2RGBA_YV12 
Python: cv.COLOR_YUV2RGBA_YV12
COLOR_YUV2BGRA_YV12 
Python: cv.COLOR_YUV2BGRA_YV12
COLOR_YUV2RGBA_IYUV 
Python: cv.COLOR_YUV2RGBA_IYUV
COLOR_YUV2BGRA_IYUV 
Python: cv.COLOR_YUV2BGRA_IYUV
COLOR_YUV2RGBA_I420 
Python: cv.COLOR_YUV2RGBA_I420
COLOR_YUV2BGRA_I420 
Python: cv.COLOR_YUV2BGRA_I420
COLOR_YUV420p2RGBA 
Python: cv.COLOR_YUV420p2RGBA
COLOR_YUV420p2BGRA 
Python: cv.COLOR_YUV420p2BGRA
COLOR_YUV2GRAY_420 
Python: cv.COLOR_YUV2GRAY_420
COLOR_YUV2GRAY_NV21 
Python: cv.COLOR_YUV2GRAY_NV21
COLOR_YUV2GRAY_NV12 
Python: cv.COLOR_YUV2GRAY_NV12
COLOR_YUV2GRAY_YV12 
Python: cv.COLOR_YUV2GRAY_YV12
COLOR_YUV2GRAY_IYUV 
Python: cv.COLOR_YUV2GRAY_IYUV
COLOR_YUV2GRAY_I420 
Python: cv.COLOR_YUV2GRAY_I420
COLOR_YUV420sp2GRAY 
Python: cv.COLOR_YUV420sp2GRAY
COLOR_YUV420p2GRAY 
Python: cv.COLOR_YUV420p2GRAY
COLOR_YUV2RGB_UYVY 
Python: cv.COLOR_YUV2RGB_UYVY

YUV 4:2:2 family to RGB.

COLOR_YUV2BGR_UYVY 
Python: cv.COLOR_YUV2BGR_UYVY
COLOR_YUV2RGB_Y422 
Python: cv.COLOR_YUV2RGB_Y422
COLOR_YUV2BGR_Y422 
Python: cv.COLOR_YUV2BGR_Y422
COLOR_YUV2RGB_UYNV 
Python: cv.COLOR_YUV2RGB_UYNV
COLOR_YUV2BGR_UYNV 
Python: cv.COLOR_YUV2BGR_UYNV
COLOR_YUV2RGBA_UYVY 
Python: cv.COLOR_YUV2RGBA_UYVY
COLOR_YUV2BGRA_UYVY 
Python: cv.COLOR_YUV2BGRA_UYVY
COLOR_YUV2RGBA_Y422 
Python: cv.COLOR_YUV2RGBA_Y422
COLOR_YUV2BGRA_Y422 
Python: cv.COLOR_YUV2BGRA_Y422
COLOR_YUV2RGBA_UYNV 
Python: cv.COLOR_YUV2RGBA_UYNV
COLOR_YUV2BGRA_UYNV 
Python: cv.COLOR_YUV2BGRA_UYNV
COLOR_YUV2RGB_YUY2 
Python: cv.COLOR_YUV2RGB_YUY2
COLOR_YUV2BGR_YUY2 
Python: cv.COLOR_YUV2BGR_YUY2
COLOR_YUV2RGB_YVYU 
Python: cv.COLOR_YUV2RGB_YVYU
COLOR_YUV2BGR_YVYU 
Python: cv.COLOR_YUV2BGR_YVYU
COLOR_YUV2RGB_YUYV 
Python: cv.COLOR_YUV2RGB_YUYV
COLOR_YUV2BGR_YUYV 
Python: cv.COLOR_YUV2BGR_YUYV
COLOR_YUV2RGB_YUNV 
Python: cv.COLOR_YUV2RGB_YUNV
COLOR_YUV2BGR_YUNV 
Python: cv.COLOR_YUV2BGR_YUNV
COLOR_YUV2RGBA_YUY2 
Python: cv.COLOR_YUV2RGBA_YUY2
COLOR_YUV2BGRA_YUY2 
Python: cv.COLOR_YUV2BGRA_YUY2
COLOR_YUV2RGBA_YVYU 
Python: cv.COLOR_YUV2RGBA_YVYU
COLOR_YUV2BGRA_YVYU 
Python: cv.COLOR_YUV2BGRA_YVYU
COLOR_YUV2RGBA_YUYV 
Python: cv.COLOR_YUV2RGBA_YUYV
COLOR_YUV2BGRA_YUYV 
Python: cv.COLOR_YUV2BGRA_YUYV
COLOR_YUV2RGBA_YUNV 
Python: cv.COLOR_YUV2RGBA_YUNV
COLOR_YUV2BGRA_YUNV 
Python: cv.COLOR_YUV2BGRA_YUNV
COLOR_YUV2GRAY_UYVY 
Python: cv.COLOR_YUV2GRAY_UYVY
COLOR_YUV2GRAY_YUY2 
Python: cv.COLOR_YUV2GRAY_YUY2
COLOR_YUV2GRAY_Y422 
Python: cv.COLOR_YUV2GRAY_Y422
COLOR_YUV2GRAY_UYNV 
Python: cv.COLOR_YUV2GRAY_UYNV
COLOR_YUV2GRAY_YVYU 
Python: cv.COLOR_YUV2GRAY_YVYU
COLOR_YUV2GRAY_YUYV 
Python: cv.COLOR_YUV2GRAY_YUYV
COLOR_YUV2GRAY_YUNV 
Python: cv.COLOR_YUV2GRAY_YUNV
COLOR_RGBA2mRGBA 
Python: cv.COLOR_RGBA2mRGBA

alpha premultiplication

COLOR_mRGBA2RGBA 
Python: cv.COLOR_mRGBA2RGBA
COLOR_RGB2YUV_I420 
Python: cv.COLOR_RGB2YUV_I420

RGB to YUV 4:2:0 family.

COLOR_BGR2YUV_I420 
Python: cv.COLOR_BGR2YUV_I420
COLOR_RGB2YUV_IYUV 
Python: cv.COLOR_RGB2YUV_IYUV
COLOR_BGR2YUV_IYUV 
Python: cv.COLOR_BGR2YUV_IYUV
COLOR_RGBA2YUV_I420 
Python: cv.COLOR_RGBA2YUV_I420
COLOR_BGRA2YUV_I420 
Python: cv.COLOR_BGRA2YUV_I420
COLOR_RGBA2YUV_IYUV 
Python: cv.COLOR_RGBA2YUV_IYUV
COLOR_BGRA2YUV_IYUV 
Python: cv.COLOR_BGRA2YUV_IYUV
COLOR_RGB2YUV_YV12 
Python: cv.COLOR_RGB2YUV_YV12
COLOR_BGR2YUV_YV12 
Python: cv.COLOR_BGR2YUV_YV12
COLOR_RGBA2YUV_YV12 
Python: cv.COLOR_RGBA2YUV_YV12
COLOR_BGRA2YUV_YV12 
Python: cv.COLOR_BGRA2YUV_YV12
COLOR_BayerBG2BGR 
Python: cv.COLOR_BayerBG2BGR

Demosaicing.

COLOR_BayerGB2BGR 
Python: cv.COLOR_BayerGB2BGR
COLOR_BayerRG2BGR 
Python: cv.COLOR_BayerRG2BGR
COLOR_BayerGR2BGR 
Python: cv.COLOR_BayerGR2BGR
COLOR_BayerBG2RGB 
Python: cv.COLOR_BayerBG2RGB
COLOR_BayerGB2RGB 
Python: cv.COLOR_BayerGB2RGB
COLOR_BayerRG2RGB 
Python: cv.COLOR_BayerRG2RGB
COLOR_BayerGR2RGB 
Python: cv.COLOR_BayerGR2RGB
COLOR_BayerBG2GRAY 
Python: cv.COLOR_BayerBG2GRAY
COLOR_BayerGB2GRAY 
Python: cv.COLOR_BayerGB2GRAY
COLOR_BayerRG2GRAY 
Python: cv.COLOR_BayerRG2GRAY
COLOR_BayerGR2GRAY 
Python: cv.COLOR_BayerGR2GRAY
COLOR_BayerBG2BGR_VNG 
Python: cv.COLOR_BayerBG2BGR_VNG

Demosaicing using Variable Number of Gradients.

COLOR_BayerGB2BGR_VNG 
Python: cv.COLOR_BayerGB2BGR_VNG
COLOR_BayerRG2BGR_VNG 
Python: cv.COLOR_BayerRG2BGR_VNG
COLOR_BayerGR2BGR_VNG 
Python: cv.COLOR_BayerGR2BGR_VNG
COLOR_BayerBG2RGB_VNG 
Python: cv.COLOR_BayerBG2RGB_VNG
COLOR_BayerGB2RGB_VNG 
Python: cv.COLOR_BayerGB2RGB_VNG
COLOR_BayerRG2RGB_VNG 
Python: cv.COLOR_BayerRG2RGB_VNG
COLOR_BayerGR2RGB_VNG 
Python: cv.COLOR_BayerGR2RGB_VNG
COLOR_BayerBG2BGR_EA 
Python: cv.COLOR_BayerBG2BGR_EA

Edge-Aware Demosaicing.

COLOR_BayerGB2BGR_EA 
Python: cv.COLOR_BayerGB2BGR_EA
COLOR_BayerRG2BGR_EA 
Python: cv.COLOR_BayerRG2BGR_EA
COLOR_BayerGR2BGR_EA 
Python: cv.COLOR_BayerGR2BGR_EA
COLOR_BayerBG2RGB_EA 
Python: cv.COLOR_BayerBG2RGB_EA
COLOR_BayerGB2RGB_EA 
Python: cv.COLOR_BayerGB2RGB_EA
COLOR_BayerRG2RGB_EA 
Python: cv.COLOR_BayerRG2RGB_EA
COLOR_BayerGR2RGB_EA 
Python: cv.COLOR_BayerGR2RGB_EA
COLOR_BayerBG2BGRA 
Python: cv.COLOR_BayerBG2BGRA

Demosaicing with alpha channel.

COLOR_BayerGB2BGRA 
Python: cv.COLOR_BayerGB2BGRA
COLOR_BayerRG2BGRA 
Python: cv.COLOR_BayerRG2BGRA
COLOR_BayerGR2BGRA 
Python: cv.COLOR_BayerGR2BGRA
COLOR_BayerBG2RGBA 
Python: cv.COLOR_BayerBG2RGBA
COLOR_BayerGB2RGBA 
Python: cv.COLOR_BayerGB2RGBA
COLOR_BayerRG2RGBA 
Python: cv.COLOR_BayerRG2RGBA
COLOR_BayerGR2RGBA 
Python: cv.COLOR_BayerGR2RGBA
COLOR_COLORCVT_MAX 
Python: cv.COLOR_COLORCVT_MAX

§ DistanceTransformLabelTypes

distanceTransform algorithm flags

Enumerator
DIST_LABEL_CCOMP 
Python: cv.DIST_LABEL_CCOMP

each connected component of zeros in src (as well as all the non-zero pixels closest to the connected component) will be assigned the same label

DIST_LABEL_PIXEL 
Python: cv.DIST_LABEL_PIXEL

each zero pixel (and all the non-zero pixels closest to it) gets its own label.

§ DistanceTransformMasks

Mask size for distance transform.

Enumerator
DIST_MASK_3 
Python: cv.DIST_MASK_3

mask=3

DIST_MASK_5 
Python: cv.DIST_MASK_5

mask=5

DIST_MASK_PRECISE 
Python: cv.DIST_MASK_PRECISE

§ DistanceTypes

Distance types for Distance Transform and M-estimators

See also
cv::distanceTransform, cv::fitLine
Enumerator
DIST_USER 
Python: cv.DIST_USER

User defined distance.

DIST_L1 
Python: cv.DIST_L1

distance = |x1-x2| + |y1-y2|

DIST_L2 
Python: cv.DIST_L2

the simple euclidean distance

DIST_C 
Python: cv.DIST_C

distance = max(|x1-x2|,|y1-y2|)

DIST_L12 
Python: cv.DIST_L12

L1-L2 metric: distance = 2(sqrt(1+x*x/2) - 1))

DIST_FAIR 
Python: cv.DIST_FAIR

distance = c^2(|x|/c-log(1+|x|/c)), c = 1.3998

DIST_WELSCH 
Python: cv.DIST_WELSCH

distance = c^2/2(1-exp(-(x/c)^2)), c = 2.9846

DIST_HUBER 
Python: cv.DIST_HUBER

distance = |x|<c ? x^2/2 : c(|x|-c/2), c=1.345

§ FloodFillFlags

floodfill algorithm flags

Enumerator
FLOODFILL_FIXED_RANGE 
Python: cv.FLOODFILL_FIXED_RANGE

If set, the difference between the current pixel and seed pixel is considered. Otherwise, the difference between neighbor pixels is considered (that is, the range is floating).

FLOODFILL_MASK_ONLY 
Python: cv.FLOODFILL_MASK_ONLY

If set, the function does not change the image ( newVal is ignored), and only fills the mask with the value specified in bits 8-16 of flags as described above. This option only make sense in function variants that have the mask parameter.

§ GrabCutClasses

class of the pixel in GrabCut algorithm

Enumerator
GC_BGD 
Python: cv.GC_BGD

an obvious background pixels

GC_FGD 
Python: cv.GC_FGD

an obvious foreground (object) pixel

GC_PR_BGD 
Python: cv.GC_PR_BGD

a possible background pixel

GC_PR_FGD 
Python: cv.GC_PR_FGD

a possible foreground pixel

§ GrabCutModes

GrabCut algorithm flags.

Enumerator
GC_INIT_WITH_RECT 
Python: cv.GC_INIT_WITH_RECT

The function initializes the state and the mask using the provided rectangle. After that it runs iterCount iterations of the algorithm.

GC_INIT_WITH_MASK 
Python: cv.GC_INIT_WITH_MASK

The function initializes the state using the provided mask. Note that GC_INIT_WITH_RECT and GC_INIT_WITH_MASK can be combined. Then, all the pixels outside of the ROI are automatically initialized with GC_BGD .

GC_EVAL 
Python: cv.GC_EVAL

The value means that the algorithm should just resume.

§ ThresholdTypes

type of the threshold operation

threshold.png
threshold types
Enumerator
THRESH_BINARY 
Python: cv.THRESH_BINARY

\[\texttt{dst} (x,y) = \fork{\texttt{maxval}}{if \(\texttt{src}(x,y) > \texttt{thresh}\)}{0}{otherwise}\]

THRESH_BINARY_INV 
Python: cv.THRESH_BINARY_INV

\[\texttt{dst} (x,y) = \fork{0}{if \(\texttt{src}(x,y) > \texttt{thresh}\)}{\texttt{maxval}}{otherwise}\]

THRESH_TRUNC 
Python: cv.THRESH_TRUNC

\[\texttt{dst} (x,y) = \fork{\texttt{threshold}}{if \(\texttt{src}(x,y) > \texttt{thresh}\)}{\texttt{src}(x,y)}{otherwise}\]

THRESH_TOZERO 
Python: cv.THRESH_TOZERO

\[\texttt{dst} (x,y) = \fork{\texttt{src}(x,y)}{if \(\texttt{src}(x,y) > \texttt{thresh}\)}{0}{otherwise}\]

THRESH_TOZERO_INV 
Python: cv.THRESH_TOZERO_INV

\[\texttt{dst} (x,y) = \fork{0}{if \(\texttt{src}(x,y) > \texttt{thresh}\)}{\texttt{src}(x,y)}{otherwise}\]

THRESH_MASK 
Python: cv.THRESH_MASK
THRESH_OTSU 
Python: cv.THRESH_OTSU

flag, use Otsu algorithm to choose the optimal threshold value

THRESH_TRIANGLE 
Python: cv.THRESH_TRIANGLE

flag, use Triangle algorithm to choose the optimal threshold value

§ UndistortTypes

cv::undistort mode

Enumerator
PROJ_SPHERICAL_ORTHO 
Python: cv.PROJ_SPHERICAL_ORTHO
PROJ_SPHERICAL_EQRECT 
Python: cv.PROJ_SPHERICAL_EQRECT

Function Documentation

§ adaptiveThreshold()

void cv::adaptiveThreshold ( InputArray  src,
OutputArray  dst,
double  maxValue,
int  adaptiveMethod,
int  thresholdType,
int  blockSize,
double  C 
)
Python:
dst=cv.adaptiveThreshold(src, maxValue, adaptiveMethod, thresholdType, blockSize, C[, dst])

Applies an adaptive threshold to an array.

The function transforms a grayscale image to a binary image according to the formulae:

  • THRESH_BINARY

    \[dst(x,y) = \fork{\texttt{maxValue}}{if \(src(x,y) > T(x,y)\)}{0}{otherwise}\]

  • THRESH_BINARY_INV

    \[dst(x,y) = \fork{0}{if \(src(x,y) > T(x,y)\)}{\texttt{maxValue}}{otherwise}\]

    where \(T(x,y)\) is a threshold calculated individually for each pixel (see adaptiveMethod parameter).

The function can process the image in-place.

Parameters
srcSource 8-bit single-channel image.
dstDestination image of the same size and the same type as src.
maxValueNon-zero value assigned to the pixels for which the condition is satisfied
adaptiveMethodAdaptive thresholding algorithm to use, see cv::AdaptiveThresholdTypes. The BORDER_REPLICATE | BORDER_ISOLATED is used to process boundaries.
thresholdTypeThresholding type that must be either THRESH_BINARY or THRESH_BINARY_INV, see cv::ThresholdTypes.
blockSizeSize of a pixel neighborhood that is used to calculate a threshold value for the pixel: 3, 5, 7, and so on.
CConstant subtracted from the mean or weighted mean (see the details below). Normally, it is positive but may be zero or negative as well.
See also
threshold, blur, GaussianBlur

§ cvtColor()

void cv::cvtColor ( InputArray  src,
OutputArray  dst,
int  code,
int  dstCn = 0 
)
Python:
dst=cv.cvtColor(src, code[, dst[, dstCn]])

Converts an image from one color space to another.

The function converts an input image from one color space to another. In case of a transformation to-from RGB color space, the order of the channels should be specified explicitly (RGB or BGR). Note that the default color format in OpenCV is often referred to as RGB but it is actually BGR (the bytes are reversed). So the first byte in a standard (24-bit) color image will be an 8-bit Blue component, the second byte will be Green, and the third byte will be Red. The fourth, fifth, and sixth bytes would then be the second pixel (Blue, then Green, then Red), and so on.

The conventional ranges for R, G, and B channel values are:

  • 0 to 255 for CV_8U images
  • 0 to 65535 for CV_16U images
  • 0 to 1 for CV_32F images

In case of linear transformations, the range does not matter. But in case of a non-linear transformation, an input RGB image should be normalized to the proper value range to get the correct results, for example, for RGB \(\rightarrow\) L*u*v* transformation. For example, if you have a 32-bit floating-point image directly converted from an 8-bit image without any scaling, then it will have the 0..255 value range instead of 0..1 assumed by the function. So, before calling cvtColor , you need first to scale the image down:

img *= 1./255;

If you use cvtColor with 8-bit images, the conversion will have some information lost. For many applications, this will not be noticeable but it is recommended to use 32-bit images in applications that need the full range of colors or that convert an image before an operation and then convert back.

If conversion adds the alpha channel, its value will set to the maximum of corresponding channel range: 255 for CV_8U, 65535 for CV_16U, 1 for CV_32F.

Parameters
srcinput image: 8-bit unsigned, 16-bit unsigned ( CV_16UC... ), or single-precision floating-point.
dstoutput image of the same size and depth as src.
codecolor space conversion code (see cv::ColorConversionCodes).
dstCnnumber of channels in the destination image; if the parameter is 0, the number of the channels is derived automatically from src and code.
See also
Color conversions
Examples:
camshiftdemo.cpp, edge.cpp, facedetect.cpp, ffilldemo.cpp, hog.cpp, houghcircles.cpp, houghlines.cpp, lkdemo.cpp, Sobel_Demo.cpp, train_HOG.cpp, and watershed.cpp.

§ distanceTransform() [1/2]

void cv::distanceTransform ( InputArray  src,
OutputArray  dst,
OutputArray  labels,
int  distanceType,
int  maskSize,
int  labelType = DIST_LABEL_CCOMP 
)
Python:
dst=cv.distanceTransform(src, distanceType, maskSize[, dst[, dstType]])
dst, labels=cv.distanceTransformWithLabels(src, distanceType, maskSize[, dst[, labels[, labelType]]])

Calculates the distance to the closest zero pixel for each pixel of the source image.

The function cv::distanceTransform calculates the approximate or precise distance from every binary image pixel to the nearest zero pixel. For zero image pixels, the distance will obviously be zero.

When maskSize == DIST_MASK_PRECISE and distanceType == DIST_L2 , the function runs the algorithm described in [51] . This algorithm is parallelized with the TBB library.

In other cases, the algorithm [16] is used. This means that for a pixel the function finds the shortest path to the nearest zero pixel consisting of basic shifts: horizontal, vertical, diagonal, or knight's move (the latest is available for a \(5\times 5\) mask). The overall distance is calculated as a sum of these basic distances. Since the distance function should be symmetric, all of the horizontal and vertical shifts must have the same cost (denoted as a ), all the diagonal shifts must have the same cost (denoted as b), and all knight's moves must have the same cost (denoted as c). For the cv::DIST_C and cv::DIST_L1 types, the distance is calculated precisely, whereas for cv::DIST_L2 (Euclidean distance) the distance can be calculated only with a relative error (a \(5\times 5\) mask gives more accurate results). For a,b, and c, OpenCV uses the values suggested in the original paper:

  • DIST_L1: a = 1, b = 2
  • DIST_L2:
    • 3 x 3: a=0.955, b=1.3693
    • 5 x 5: a=1, b=1.4, c=2.1969
  • DIST_C: a = 1, b = 1

Typically, for a fast, coarse distance estimation DIST_L2, a \(3\times 3\) mask is used. For a more accurate distance estimation DIST_L2, a \(5\times 5\) mask or the precise algorithm is used. Note that both the precise and the approximate algorithms are linear on the number of pixels.

This variant of the function does not only compute the minimum distance for each pixel \((x, y)\) but also identifies the nearest connected component consisting of zero pixels (labelType==DIST_LABEL_CCOMP) or the nearest zero pixel (labelType==DIST_LABEL_PIXEL). Index of the component/pixel is stored in labels(x, y). When labelType==DIST_LABEL_CCOMP, the function automatically finds connected components of zero pixels in the input image and marks them with distinct labels. When labelType==DIST_LABEL_CCOMP, the function scans through the input image and marks all the zero pixels with distinct labels.

In this mode, the complexity is still linear. That is, the function provides a very fast way to compute the Voronoi diagram for a binary image. Currently, the second variant can use only the approximate distance transform algorithm, i.e. maskSize=DIST_MASK_PRECISE is not supported yet.

Parameters
src8-bit, single-channel (binary) source image.
dstOutput image with calculated distances. It is a 8-bit or 32-bit floating-point, single-channel image of the same size as src.
labelsOutput 2D array of labels (the discrete Voronoi diagram). It has the type CV_32SC1 and the same size as src.
distanceTypeType of distance, see cv::DistanceTypes
maskSizeSize of the distance transform mask, see cv::DistanceTransformMasks. DIST_MASK_PRECISE is not supported by this variant. In case of the DIST_L1 or DIST_C distance type, the parameter is forced to 3 because a \(3\times 3\) mask gives the same result as \(5\times 5\) or any larger aperture.
labelTypeType of the label array to build, see cv::DistanceTransformLabelTypes.
Examples:
distrans.cpp.

§ distanceTransform() [2/2]

void cv::distanceTransform ( InputArray  src,
OutputArray  dst,
int  distanceType,
int  maskSize,
int  dstType = CV_32F 
)
Python:
dst=cv.distanceTransform(src, distanceType, maskSize[, dst[, dstType]])
dst, labels=cv.distanceTransformWithLabels(src, distanceType, maskSize[, dst[, labels[, labelType]]])

This is an overloaded member function, provided for convenience. It differs from the above function only in what argument(s) it accepts.

Parameters
src8-bit, single-channel (binary) source image.
dstOutput image with calculated distances. It is a 8-bit or 32-bit floating-point, single-channel image of the same size as src .
distanceTypeType of distance, see cv::DistanceTypes
maskSizeSize of the distance transform mask, see cv::DistanceTransformMasks. In case of the DIST_L1 or DIST_C distance type, the parameter is forced to 3 because a \(3\times 3\) mask gives the same result as \(5\times 5\) or any larger aperture.
dstTypeType of output image. It can be CV_8U or CV_32F. Type CV_8U can be used only for the first variant of the function and distanceType == DIST_L1.

§ floodFill() [1/2]

int cv::floodFill ( InputOutputArray  image,
Point  seedPoint,
Scalar  newVal,
Rect rect = 0,
Scalar  loDiff = Scalar(),
Scalar  upDiff = Scalar(),
int  flags = 4 
)
Python:
retval, image, mask, rect=cv.floodFill(image, mask, seedPoint, newVal[, loDiff[, upDiff[, flags]]])

This is an overloaded member function, provided for convenience. It differs from the above function only in what argument(s) it accepts.

variant without mask parameter

Examples:
ffilldemo.cpp.

§ floodFill() [2/2]

int cv::floodFill ( InputOutputArray  image,
InputOutputArray  mask,
Point  seedPoint,
Scalar  newVal,
Rect rect = 0,
Scalar  loDiff = Scalar(),
Scalar  upDiff = Scalar(),
int  flags = 4 
)
Python:
retval, image, mask, rect=cv.floodFill(image, mask, seedPoint, newVal[, loDiff[, upDiff[, flags]]])

Fills a connected component with the given color.

The function cv::floodFill fills a connected component starting from the seed point with the specified color. The connectivity is determined by the color/brightness closeness of the neighbor pixels. The pixel at \((x,y)\) is considered to belong to the repainted domain if:

  • in case of a grayscale image and floating range

    \[\texttt{src} (x',y')- \texttt{loDiff} \leq \texttt{src} (x,y) \leq \texttt{src} (x',y')+ \texttt{upDiff}\]

  • in case of a grayscale image and fixed range

    \[\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)- \texttt{loDiff} \leq \texttt{src} (x,y) \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)+ \texttt{upDiff}\]

  • in case of a color image and floating range

    \[\texttt{src} (x',y')_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} (x',y')_r+ \texttt{upDiff} _r,\]

    \[\texttt{src} (x',y')_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} (x',y')_g+ \texttt{upDiff} _g\]

    and

    \[\texttt{src} (x',y')_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} (x',y')_b+ \texttt{upDiff} _b\]

  • in case of a color image and fixed range

    \[\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r+ \texttt{upDiff} _r,\]

    \[\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g+ \texttt{upDiff} _g\]

    and

    \[\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b+ \texttt{upDiff} _b\]

where \(src(x',y')\) is the value of one of pixel neighbors that is already known to belong to the component. That is, to be added to the connected component, a color/brightness of the pixel should be close enough to:

  • Color/brightness of one of its neighbors that already belong to the connected component in case of a floating range.
  • Color/brightness of the seed point in case of a fixed range.

Use these functions to either mark a connected component with the specified color in-place, or build a mask and then extract the contour, or copy the region to another image, and so on.

Parameters
imageInput/output 1- or 3-channel, 8-bit, or floating-point image. It is modified by the function unless the FLOODFILL_MASK_ONLY flag is set in the second variant of the function. See the details below.
maskOperation mask that should be a single-channel 8-bit image, 2 pixels wider and 2 pixels taller than image. Since this is both an input and output parameter, you must take responsibility of initializing it. Flood-filling cannot go across non-zero pixels in the input mask. For example, an edge detector output can be used as a mask to stop filling at edges. On output, pixels in the mask corresponding to filled pixels in the image are set to 1 or to the a value specified in flags as described below. Additionally, the function fills the border of the mask with ones to simplify internal processing. It is therefore possible to use the same mask in multiple calls to the function to make sure the filled areas do not overlap.
seedPointStarting point.
newValNew value of the repainted domain pixels.
loDiffMaximal lower brightness/color difference between the currently observed pixel and one of its neighbors belonging to the component, or a seed pixel being added to the component.
upDiffMaximal upper brightness/color difference between the currently observed pixel and one of its neighbors belonging to the component, or a seed pixel being added to the component.
rectOptional output parameter set by the function to the minimum bounding rectangle of the repainted domain.
flagsOperation flags. The first 8 bits contain a connectivity value. The default value of 4 means that only the four nearest neighbor pixels (those that share an edge) are considered. A connectivity value of 8 means that the eight nearest neighbor pixels (those that share a corner) will be considered. The next 8 bits (8-16) contain a value between 1 and 255 with which to fill the mask (the default value is 1). For example, 4 | ( 255 << 8 ) will consider 4 nearest neighbours and fill the mask with a value of 255. The following additional options occupy higher bits and therefore may be further combined with the connectivity and mask fill values using bit-wise or (|), see cv::FloodFillFlags.
Note
Since the mask is larger than the filled image, a pixel \((x, y)\) in image corresponds to the pixel \((x+1, y+1)\) in the mask .
See also
findContours

§ grabCut()

void cv::grabCut ( InputArray  img,
InputOutputArray  mask,
Rect  rect,
InputOutputArray  bgdModel,
InputOutputArray  fgdModel,
int  iterCount,
int  mode = GC_EVAL 
)
Python:
mask, bgdModel, fgdModel=cv.grabCut(img, mask, rect, bgdModel, fgdModel, iterCount[, mode])

Runs the GrabCut algorithm.

The function implements the GrabCut image segmentation algorithm.

Parameters
imgInput 8-bit 3-channel image.
maskInput/output 8-bit single-channel mask. The mask is initialized by the function when mode is set to GC_INIT_WITH_RECT. Its elements may have one of the cv::GrabCutClasses.
rectROI containing a segmented object. The pixels outside of the ROI are marked as "obvious background". The parameter is only used when mode==GC_INIT_WITH_RECT .
bgdModelTemporary array for the background model. Do not modify it while you are processing the same image.
fgdModelTemporary arrays for the foreground model. Do not modify it while you are processing the same image.
iterCountNumber of iterations the algorithm should make before returning the result. Note that the result can be refined with further calls with mode==GC_INIT_WITH_MASK or mode==GC_EVAL .
modeOperation mode that could be one of the cv::GrabCutModes
Examples:
grabcut.cpp.

§ integral() [1/3]

void cv::integral ( InputArray  src,
OutputArray  sum,
int  sdepth = -1 
)
Python:
sum=cv.integral(src[, sum[, sdepth]])
sum, sqsum=cv.integral2(src[, sum[, sqsum[, sdepth[, sqdepth]]]])
sum, sqsum, tilted=cv.integral3(src[, sum[, sqsum[, tilted[, sdepth[, sqdepth]]]]])

This is an overloaded member function, provided for convenience. It differs from the above function only in what argument(s) it accepts.

§ integral() [2/3]

void cv::integral ( InputArray  src,
OutputArray  sum,
OutputArray  sqsum,
int  sdepth = -1,
int  sqdepth = -1 
)
Python:
sum=cv.integral(src[, sum[, sdepth]])
sum, sqsum=cv.integral2(src[, sum[, sqsum[, sdepth[, sqdepth]]]])
sum, sqsum, tilted=cv.integral3(src[, sum[, sqsum[, tilted[, sdepth[, sqdepth]]]]])

This is an overloaded member function, provided for convenience. It differs from the above function only in what argument(s) it accepts.

§ integral() [3/3]

void cv::integral ( InputArray  src,
OutputArray  sum,
OutputArray  sqsum,
OutputArray  tilted,
int  sdepth = -1,
int  sqdepth = -1 
)
Python:
sum=cv.integral(src[, sum[, sdepth]])
sum, sqsum=cv.integral2(src[, sum[, sqsum[, sdepth[, sqdepth]]]])
sum, sqsum, tilted=cv.integral3(src[, sum[, sqsum[, tilted[, sdepth[, sqdepth]]]]])

Calculates the integral of an image.

The function calculates one or more integral images for the source image as follows:

\[\texttt{sum} (X,Y) = \sum _{x<X,y<Y} \texttt{image} (x,y)\]

\[\texttt{sqsum} (X,Y) = \sum _{x<X,y<Y} \texttt{image} (x,y)^2\]

\[\texttt{tilted} (X,Y) = \sum _{y<Y,abs(x-X+1) \leq Y-y-1} \texttt{image} (x,y)\]

Using these integral images, you can calculate sum, mean, and standard deviation over a specific up-right or rotated rectangular region of the image in a constant time, for example:

\[\sum _{x_1 \leq x < x_2, \, y_1 \leq y < y_2} \texttt{image} (x,y) = \texttt{sum} (x_2,y_2)- \texttt{sum} (x_1,y_2)- \texttt{sum} (x_2,y_1)+ \texttt{sum} (x_1,y_1)\]

It makes possible to do a fast blurring or fast block correlation with a variable window size, for example. In case of multi-channel images, sums for each channel are accumulated independently.

As a practical example, the next figure shows the calculation of the integral of a straight rectangle Rect(3,3,3,2) and of a tilted rectangle Rect(5,1,2,3) . The selected pixels in the original image are shown, as well as the relative pixels in the integral images sum and tilted .

integral.png
integral calculation example
Parameters
srcinput image as \(W \times H\), 8-bit or floating-point (32f or 64f).
sumintegral image as \((W+1)\times (H+1)\) , 32-bit integer or floating-point (32f or 64f).
sqsumintegral image for squared pixel values; it is \((W+1)\times (H+1)\), double-precision floating-point (64f) array.
tiltedintegral for the image rotated by 45 degrees; it is \((W+1)\times (H+1)\) array with the same data type as sum.
sdepthdesired depth of the integral and the tilted integral images, CV_32S, CV_32F, or CV_64F.
sqdepthdesired depth of the integral image of squared pixel values, CV_32F or CV_64F.

§ threshold()

double cv::threshold ( InputArray  src,
OutputArray  dst,
double  thresh,
double  maxval,
int  type 
)
Python:
retval, dst=cv.threshold(src, thresh, maxval, type[, dst])

Applies a fixed-level threshold to each array element.

The function applies fixed-level thresholding to a multiple-channel array. The function is typically used to get a bi-level (binary) image out of a grayscale image ( cv::compare could be also used for this purpose) or for removing a noise, that is, filtering out pixels with too small or too large values. There are several types of thresholding supported by the function. They are determined by type parameter.

Also, the special values cv::THRESH_OTSU or cv::THRESH_TRIANGLE may be combined with one of the above values. In these cases, the function determines the optimal threshold value using the Otsu's or Triangle algorithm and uses it instead of the specified thresh . The function returns the computed threshold value. Currently, the Otsu's and Triangle methods are implemented only for 8-bit images.

Note
Input image should be single channel only in case of CV_THRESH_OTSU or CV_THRESH_TRIANGLE flags
Parameters
srcinput array (multiple-channel, 8-bit or 32-bit floating point).
dstoutput array of the same size and type and the same number of channels as src.
threshthreshold value.
maxvalmaximum value to use with the THRESH_BINARY and THRESH_BINARY_INV thresholding types.
typethresholding type (see the cv::ThresholdTypes).
See also
adaptiveThreshold, findContours, compare, min, max
Examples:
ffilldemo.cpp.

§ watershed()

void cv::watershed ( InputArray  image,
InputOutputArray  markers 
)
Python:
markers=cv.watershed(image, markers)

Performs a marker-based image segmentation using the watershed algorithm.

The function implements one of the variants of watershed, non-parametric marker-based segmentation algorithm, described in [125] .

Before passing the image to the function, you have to roughly outline the desired regions in the image markers with positive (>0) indices. So, every region is represented as one or more connected components with the pixel values 1, 2, 3, and so on. Such markers can be retrieved from a binary mask using findContours and drawContours (see the watershed.cpp demo). The markers are "seeds" of the future image regions. All the other pixels in markers , whose relation to the outlined regions is not known and should be defined by the algorithm, should be set to 0's. In the function output, each pixel in markers is set to a value of the "seed" components or to -1 at boundaries between the regions.

Note
Any two neighbor connected components are not necessarily separated by a watershed boundary (-1's pixels); for example, they can touch each other in the initial marker image passed to the function.
Parameters
imageInput 8-bit 3-channel image.
markersInput/output 32-bit single-channel image (map) of markers. It should have the same size as image .
See also
findContours
Examples:
watershed.cpp.