OpenCV 2.4.7

org.opencv.core
Class Core

java.lang.Object
  extended by org.opencv.core.Core

public class Core
extends java.lang.Object


Nested Class Summary
static class Core.MinMaxLocResult
           
 
Field Summary
static int CMP_EQ
           
static int CMP_GE
           
static int CMP_GT
           
static int CMP_LE
           
static int CMP_LT
           
static int CMP_NE
           
static int COVAR_COLS
           
static int COVAR_NORMAL
           
static int COVAR_ROWS
           
static int COVAR_SCALE
           
static int COVAR_SCRAMBLED
           
static int COVAR_USE_AVG
           
static int DCT_INVERSE
           
static int DCT_ROWS
           
static int DECOMP_CHOLESKY
           
static int DECOMP_EIG
           
static int DECOMP_LU
           
static int DECOMP_NORMAL
           
static int DECOMP_QR
           
static int DECOMP_SVD
           
static int DEPTH_MASK
           
static int DEPTH_MASK_16S
           
static int DEPTH_MASK_16U
           
static int DEPTH_MASK_32F
           
static int DEPTH_MASK_32S
           
static int DEPTH_MASK_64F
           
static int DEPTH_MASK_8S
           
static int DEPTH_MASK_8U
           
static int DEPTH_MASK_ALL
           
static int DEPTH_MASK_ALL_BUT_8S
           
static int DEPTH_MASK_FLT
           
static int DFT_COMPLEX_OUTPUT
           
static int DFT_INVERSE
           
static int DFT_REAL_OUTPUT
           
static int DFT_ROWS
           
static int DFT_SCALE
           
static int FILLED
           
static int FONT_HERSHEY_COMPLEX
           
static int FONT_HERSHEY_COMPLEX_SMALL
           
static int FONT_HERSHEY_DUPLEX
           
static int FONT_HERSHEY_PLAIN
           
static int FONT_HERSHEY_SCRIPT_COMPLEX
           
static int FONT_HERSHEY_SCRIPT_SIMPLEX
           
static int FONT_HERSHEY_SIMPLEX
           
static int FONT_HERSHEY_TRIPLEX
           
static int FONT_ITALIC
           
static int GEMM_1_T
           
static int GEMM_2_T
           
static int GEMM_3_T
           
static int KMEANS_PP_CENTERS
           
static int KMEANS_RANDOM_CENTERS
           
static int KMEANS_USE_INITIAL_LABELS
           
static int LINE_4
           
static int LINE_8
           
static int LINE_AA
           
static int MAGIC_MASK
           
static java.lang.String NATIVE_LIBRARY_NAME
           
static int NORM_HAMMING
           
static int NORM_HAMMING2
           
static int NORM_INF
           
static int NORM_L1
           
static int NORM_L2
           
static int NORM_L2SQR
           
static int NORM_MINMAX
           
static int NORM_RELATIVE
           
static int NORM_TYPE_MASK
           
static int REDUCE_AVG
           
static int REDUCE_MAX
           
static int REDUCE_MIN
           
static int REDUCE_SUM
           
static int SORT_ASCENDING
           
static int SORT_DESCENDING
           
static int SORT_EVERY_COLUMN
           
static int SORT_EVERY_ROW
           
static int SVD_FULL_UV
           
static int SVD_MODIFY_A
           
static int SVD_NO_UV
           
static int TYPE_MASK
           
static java.lang.String VERSION
           
static int VERSION_EPOCH
           
static int VERSION_MAJOR
           
static int VERSION_MINOR
           
static int VERSION_REVISION
           
 
Constructor Summary
Core()
           
 
Method Summary
static void absdiff(Mat src1, Mat src2, Mat dst)
          Calculates the per-element absolute difference between two arrays or between an array and a scalar.
static void absdiff(Mat src1, Scalar src2, Mat dst)
          Calculates the per-element absolute difference between two arrays or between an array and a scalar.
static void add(Mat src1, Mat src2, Mat dst)
          Calculates the per-element sum of two arrays or an array and a scalar.
static void add(Mat src1, Mat src2, Mat dst, Mat mask)
          Calculates the per-element sum of two arrays or an array and a scalar.
static void add(Mat src1, Mat src2, Mat dst, Mat mask, int dtype)
          Calculates the per-element sum of two arrays or an array and a scalar.
static void add(Mat src1, Scalar src2, Mat dst)
          Calculates the per-element sum of two arrays or an array and a scalar.
static void add(Mat src1, Scalar src2, Mat dst, Mat mask)
          Calculates the per-element sum of two arrays or an array and a scalar.
static void add(Mat src1, Scalar src2, Mat dst, Mat mask, int dtype)
          Calculates the per-element sum of two arrays or an array and a scalar.
static void addWeighted(Mat src1, double alpha, Mat src2, double beta, double gamma, Mat dst)
          Calculates the weighted sum of two arrays.
static void addWeighted(Mat src1, double alpha, Mat src2, double beta, double gamma, Mat dst, int dtype)
          Calculates the weighted sum of two arrays.
static void batchDistance(Mat src1, Mat src2, Mat dist, int dtype, Mat nidx)
           
static void batchDistance(Mat src1, Mat src2, Mat dist, int dtype, Mat nidx, int normType, int K)
           
static void batchDistance(Mat src1, Mat src2, Mat dist, int dtype, Mat nidx, int normType, int K, Mat mask, int update, boolean crosscheck)
           
static void bitwise_and(Mat src1, Mat src2, Mat dst)
          Calculates the per-element bit-wise conjunction of two arrays or an array and a scalar.
static void bitwise_and(Mat src1, Mat src2, Mat dst, Mat mask)
          Calculates the per-element bit-wise conjunction of two arrays or an array and a scalar.
static void bitwise_not(Mat src, Mat dst)
          Inverts every bit of an array.
static void bitwise_not(Mat src, Mat dst, Mat mask)
          Inverts every bit of an array.
static void bitwise_or(Mat src1, Mat src2, Mat dst)
          Calculates the per-element bit-wise disjunction of two arrays or an array and a scalar.
static void bitwise_or(Mat src1, Mat src2, Mat dst, Mat mask)
          Calculates the per-element bit-wise disjunction of two arrays or an array and a scalar.
static void bitwise_xor(Mat src1, Mat src2, Mat dst)
          Calculates the per-element bit-wise "exclusive or" operation on two arrays or an array and a scalar.
static void bitwise_xor(Mat src1, Mat src2, Mat dst, Mat mask)
          Calculates the per-element bit-wise "exclusive or" operation on two arrays or an array and a scalar.
static void calcCovarMatrix(Mat samples, Mat covar, Mat mean, int flags)
          Calculates the covariance matrix of a set of vectors.
static void calcCovarMatrix(Mat samples, Mat covar, Mat mean, int flags, int ctype)
          Calculates the covariance matrix of a set of vectors.
static void cartToPolar(Mat x, Mat y, Mat magnitude, Mat angle)
          Calculates the magnitude and angle of 2D vectors.
static void cartToPolar(Mat x, Mat y, Mat magnitude, Mat angle, boolean angleInDegrees)
          Calculates the magnitude and angle of 2D vectors.
static boolean checkRange(Mat a)
          Checks every element of an input array for invalid values.
static boolean checkRange(Mat a, boolean quiet, double minVal, double maxVal)
          Checks every element of an input array for invalid values.
static void circle(Mat img, Point center, int radius, Scalar color)
          Draws a circle.
static void circle(Mat img, Point center, int radius, Scalar color, int thickness)
          Draws a circle.
static void circle(Mat img, Point center, int radius, Scalar color, int thickness, int lineType, int shift)
          Draws a circle.
static boolean clipLine(Rect imgRect, Point pt1, Point pt2)
          Clips the line against the image rectangle.
static void compare(Mat src1, Mat src2, Mat dst, int cmpop)
          Performs the per-element comparison of two arrays or an array and scalar value.
static void compare(Mat src1, Scalar src2, Mat dst, int cmpop)
          Performs the per-element comparison of two arrays or an array and scalar value.
static void completeSymm(Mat mtx)
          Copies the lower or the upper half of a square matrix to another half.
static void completeSymm(Mat mtx, boolean lowerToUpper)
          Copies the lower or the upper half of a square matrix to another half.
static void convertScaleAbs(Mat src, Mat dst)
          Scales, calculates absolute values, and converts the result to 8-bit.
static void convertScaleAbs(Mat src, Mat dst, double alpha, double beta)
          Scales, calculates absolute values, and converts the result to 8-bit.
static int countNonZero(Mat src)
          Counts non-zero array elements.
static float cubeRoot(float val)
          Computes the cube root of an argument.
static void dct(Mat src, Mat dst)
          Performs a forward or inverse discrete Cosine transform of 1D or 2D array.
static void dct(Mat src, Mat dst, int flags)
          Performs a forward or inverse discrete Cosine transform of 1D or 2D array.
static double determinant(Mat mtx)
          Returns the determinant of a square floating-point matrix.
static void dft(Mat src, Mat dst)
          Performs a forward or inverse Discrete Fourier transform of a 1D or 2D floating-point array.
static void dft(Mat src, Mat dst, int flags, int nonzeroRows)
          Performs a forward or inverse Discrete Fourier transform of a 1D or 2D floating-point array.
static void divide(double scale, Mat src2, Mat dst)
          Performs per-element division of two arrays or a scalar by an array.
static void divide(double scale, Mat src2, Mat dst, int dtype)
          Performs per-element division of two arrays or a scalar by an array.
static void divide(Mat src1, Mat src2, Mat dst)
          Performs per-element division of two arrays or a scalar by an array.
static void divide(Mat src1, Mat src2, Mat dst, double scale)
          Performs per-element division of two arrays or a scalar by an array.
static void divide(Mat src1, Mat src2, Mat dst, double scale, int dtype)
          Performs per-element division of two arrays or a scalar by an array.
static void divide(Mat src1, Scalar src2, Mat dst)
          Performs per-element division of two arrays or a scalar by an array.
static void divide(Mat src1, Scalar src2, Mat dst, double scale)
          Performs per-element division of two arrays or a scalar by an array.
static void divide(Mat src1, Scalar src2, Mat dst, double scale, int dtype)
          Performs per-element division of two arrays or a scalar by an array.
static boolean eigen(Mat src, boolean computeEigenvectors, Mat eigenvalues, Mat eigenvectors)
          Calculates eigenvalues and eigenvectors of a symmetric matrix.
static void ellipse(Mat img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color)
          Draws a simple or thick elliptic arc or fills an ellipse sector.
static void ellipse(Mat img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color, int thickness)
          Draws a simple or thick elliptic arc or fills an ellipse sector.
static void ellipse(Mat img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color, int thickness, int lineType, int shift)
          Draws a simple or thick elliptic arc or fills an ellipse sector.
static void ellipse(Mat img, RotatedRect box, Scalar color)
          Draws a simple or thick elliptic arc or fills an ellipse sector.
static void ellipse(Mat img, RotatedRect box, Scalar color, int thickness)
          Draws a simple or thick elliptic arc or fills an ellipse sector.
static void ellipse(Mat img, RotatedRect box, Scalar color, int thickness, int lineType)
          Draws a simple or thick elliptic arc or fills an ellipse sector.
static void ellipse2Poly(Point center, Size axes, int angle, int arcStart, int arcEnd, int delta, MatOfPoint pts)
          Approximates an elliptic arc with a polyline.
static void exp(Mat src, Mat dst)
          Calculates the exponent of every array element.
static void extractChannel(Mat src, Mat dst, int coi)
           
static float fastAtan2(float y, float x)
          Calculates the angle of a 2D vector in degrees.
static void fillConvexPoly(Mat img, MatOfPoint points, Scalar color)
          Fills a convex polygon.
static void fillConvexPoly(Mat img, MatOfPoint points, Scalar color, int lineType, int shift)
          Fills a convex polygon.
static void fillPoly(Mat img, java.util.List<MatOfPoint> pts, Scalar color)
          Fills the area bounded by one or more polygons.
static void fillPoly(Mat img, java.util.List<MatOfPoint> pts, Scalar color, int lineType, int shift, Point offset)
          Fills the area bounded by one or more polygons.
static void findNonZero(Mat src, Mat idx)
           
static void flip(Mat src, Mat dst, int flipCode)
          Flips a 2D array around vertical, horizontal, or both axes.
static void gemm(Mat src1, Mat src2, double alpha, Mat src3, double gamma, Mat dst)
          Performs generalized matrix multiplication.
static void gemm(Mat src1, Mat src2, double alpha, Mat src3, double gamma, Mat dst, int flags)
          Performs generalized matrix multiplication.
static java.lang.String getBuildInformation()
          Returns full configuration time cmake output.
static long getCPUTickCount()
          Returns the number of CPU ticks.
static int getNumberOfCPUs()
          Returns the number of logical CPUs available for the process.
static int getOptimalDFTSize(int vecsize)
          Returns the optimal DFT size for a given vector size.
static Size getTextSize(java.lang.String text, int fontFace, double fontScale, int thickness, int[] baseLine)
          Calculates the width and height of a text string.
static long getTickCount()
          Returns the number of ticks.
static double getTickFrequency()
          Returns the number of ticks per second.
static void hconcat(java.util.List<Mat> src, Mat dst)
           
static void idct(Mat src, Mat dst)
          Calculates the inverse Discrete Cosine Transform of a 1D or 2D array.
static void idct(Mat src, Mat dst, int flags)
          Calculates the inverse Discrete Cosine Transform of a 1D or 2D array.
static void idft(Mat src, Mat dst)
          Calculates the inverse Discrete Fourier Transform of a 1D or 2D array.
static void idft(Mat src, Mat dst, int flags, int nonzeroRows)
          Calculates the inverse Discrete Fourier Transform of a 1D or 2D array.
static void inRange(Mat src, Scalar lowerb, Scalar upperb, Mat dst)
          Checks if array elements lie between the elements of two other arrays.
static void insertChannel(Mat src, Mat dst, int coi)
           
static double invert(Mat src, Mat dst)
          Finds the inverse or pseudo-inverse of a matrix.
static double invert(Mat src, Mat dst, int flags)
          Finds the inverse or pseudo-inverse of a matrix.
static double kmeans(Mat data, int K, Mat bestLabels, TermCriteria criteria, int attempts, int flags)
          Finds centers of clusters and groups input samples around the clusters.
static double kmeans(Mat data, int K, Mat bestLabels, TermCriteria criteria, int attempts, int flags, Mat centers)
          Finds centers of clusters and groups input samples around the clusters.
static void line(Mat img, Point pt1, Point pt2, Scalar color)
          Draws a line segment connecting two points.
static void line(Mat img, Point pt1, Point pt2, Scalar color, int thickness)
          Draws a line segment connecting two points.
static void line(Mat img, Point pt1, Point pt2, Scalar color, int thickness, int lineType, int shift)
          Draws a line segment connecting two points.
static void log(Mat src, Mat dst)
          Calculates the natural logarithm of every array element.
static void LUT(Mat src, Mat lut, Mat dst)
          Performs a look-up table transform of an array.
static void LUT(Mat src, Mat lut, Mat dst, int interpolation)
          Performs a look-up table transform of an array.
static void magnitude(Mat x, Mat y, Mat magnitude)
          Calculates the magnitude of 2D vectors.
static double Mahalanobis(Mat v1, Mat v2, Mat icovar)
          Calculates the Mahalanobis distance between two vectors.
static void max(Mat src1, Mat src2, Mat dst)
          Calculates per-element maximum of two arrays or an array and a scalar.
static void max(Mat src1, Scalar src2, Mat dst)
          Calculates per-element maximum of two arrays or an array and a scalar.
static Scalar mean(Mat src)
          Calculates an average (mean) of array elements.
static Scalar mean(Mat src, Mat mask)
          Calculates an average (mean) of array elements.
static void meanStdDev(Mat src, MatOfDouble mean, MatOfDouble stddev)
          Calculates a mean and standard deviation of array elements.
static void meanStdDev(Mat src, MatOfDouble mean, MatOfDouble stddev, Mat mask)
          Calculates a mean and standard deviation of array elements.
static void merge(java.util.List<Mat> mv, Mat dst)
          Creates one multichannel array out of several single-channel ones.
static void min(Mat src1, Mat src2, Mat dst)
          Calculates per-element minimum of two arrays or an array and a scalar.
static void min(Mat src1, Scalar src2, Mat dst)
          Calculates per-element minimum of two arrays or an array and a scalar.
static Core.MinMaxLocResult minMaxLoc(Mat src)
          Finds the global minimum and maximum in an array.
static Core.MinMaxLocResult minMaxLoc(Mat src, Mat mask)
          Finds the global minimum and maximum in an array.
static void mixChannels(java.util.List<Mat> src, java.util.List<Mat> dst, MatOfInt fromTo)
          Copies specified channels from input arrays to the specified channels of output arrays.
static void mulSpectrums(Mat a, Mat b, Mat c, int flags)
          Performs the per-element multiplication of two Fourier spectrums.
static void mulSpectrums(Mat a, Mat b, Mat c, int flags, boolean conjB)
          Performs the per-element multiplication of two Fourier spectrums.
static void multiply(Mat src1, Mat src2, Mat dst)
          Calculates the per-element scaled product of two arrays.
static void multiply(Mat src1, Mat src2, Mat dst, double scale)
          Calculates the per-element scaled product of two arrays.
static void multiply(Mat src1, Mat src2, Mat dst, double scale, int dtype)
          Calculates the per-element scaled product of two arrays.
static void multiply(Mat src1, Scalar src2, Mat dst)
          Calculates the per-element scaled product of two arrays.
static void multiply(Mat src1, Scalar src2, Mat dst, double scale)
          Calculates the per-element scaled product of two arrays.
static void multiply(Mat src1, Scalar src2, Mat dst, double scale, int dtype)
          Calculates the per-element scaled product of two arrays.
static void mulTransposed(Mat src, Mat dst, boolean aTa)
          Calculates the product of a matrix and its transposition.
static void mulTransposed(Mat src, Mat dst, boolean aTa, Mat delta, double scale)
          Calculates the product of a matrix and its transposition.
static void mulTransposed(Mat src, Mat dst, boolean aTa, Mat delta, double scale, int dtype)
          Calculates the product of a matrix and its transposition.
static double norm(Mat src1)
          Calculates an absolute array norm, an absolute difference norm, or a relative difference norm.
static double norm(Mat src1, int normType)
          Calculates an absolute array norm, an absolute difference norm, or a relative difference norm.
static double norm(Mat src1, int normType, Mat mask)
          Calculates an absolute array norm, an absolute difference norm, or a relative difference norm.
static double norm(Mat src1, Mat src2)
          Calculates an absolute array norm, an absolute difference norm, or a relative difference norm.
static double norm(Mat src1, Mat src2, int normType)
          Calculates an absolute array norm, an absolute difference norm, or a relative difference norm.
static double norm(Mat src1, Mat src2, int normType, Mat mask)
          Calculates an absolute array norm, an absolute difference norm, or a relative difference norm.
static void normalize(Mat src, Mat dst)
          Normalizes the norm or value range of an array.
static void normalize(Mat src, Mat dst, double alpha, double beta, int norm_type)
          Normalizes the norm or value range of an array.
static void normalize(Mat src, Mat dst, double alpha, double beta, int norm_type, int dtype)
          Normalizes the norm or value range of an array.
static void normalize(Mat src, Mat dst, double alpha, double beta, int norm_type, int dtype, Mat mask)
          Normalizes the norm or value range of an array.
static void patchNaNs(Mat a)
           
static void patchNaNs(Mat a, double val)
           
static void PCABackProject(Mat data, Mat mean, Mat eigenvectors, Mat result)
           
static void PCACompute(Mat data, Mat mean, Mat eigenvectors)
           
static void PCACompute(Mat data, Mat mean, Mat eigenvectors, int maxComponents)
           
static void PCAComputeVar(Mat data, Mat mean, Mat eigenvectors, double retainedVariance)
           
static void PCAProject(Mat data, Mat mean, Mat eigenvectors, Mat result)
           
static void perspectiveTransform(Mat src, Mat dst, Mat m)
          Performs the perspective matrix transformation of vectors.
static void phase(Mat x, Mat y, Mat angle)
          Calculates the rotation angle of 2D vectors.
static void phase(Mat x, Mat y, Mat angle, boolean angleInDegrees)
          Calculates the rotation angle of 2D vectors.
static void polarToCart(Mat magnitude, Mat angle, Mat x, Mat y)
          Calculates x and y coordinates of 2D vectors from their magnitude and angle.
static void polarToCart(Mat magnitude, Mat angle, Mat x, Mat y, boolean angleInDegrees)
          Calculates x and y coordinates of 2D vectors from their magnitude and angle.
static void polylines(Mat img, java.util.List<MatOfPoint> pts, boolean isClosed, Scalar color)
          Draws several polygonal curves.
static void polylines(Mat img, java.util.List<MatOfPoint> pts, boolean isClosed, Scalar color, int thickness)
          Draws several polygonal curves.
static void polylines(Mat img, java.util.List<MatOfPoint> pts, boolean isClosed, Scalar color, int thickness, int lineType, int shift)
          Draws several polygonal curves.
static void pow(Mat src, double power, Mat dst)
          Raises every array element to a power.
static void putText(Mat img, java.lang.String text, Point org, int fontFace, double fontScale, Scalar color)
          Draws a text string.
static void putText(Mat img, java.lang.String text, Point org, int fontFace, double fontScale, Scalar color, int thickness)
          Draws a text string.
static void putText(Mat img, java.lang.String text, Point org, int fontFace, double fontScale, Scalar color, int thickness, int lineType, boolean bottomLeftOrigin)
          Draws a text string.
static void randn(Mat dst, double mean, double stddev)
          Fills the array with normally distributed random numbers.
static void randShuffle(Mat dst)
           
static void randShuffle(Mat dst, double iterFactor)
           
static void randu(Mat dst, double low, double high)
          Generates a single uniformly-distributed random number or an array of random numbers.
static void rectangle(Mat img, Point pt1, Point pt2, Scalar color)
          Draws a simple, thick, or filled up-right rectangle.
static void rectangle(Mat img, Point pt1, Point pt2, Scalar color, int thickness)
          Draws a simple, thick, or filled up-right rectangle.
static void rectangle(Mat img, Point pt1, Point pt2, Scalar color, int thickness, int lineType, int shift)
          Draws a simple, thick, or filled up-right rectangle.
static void reduce(Mat src, Mat dst, int dim, int rtype)
          Reduces a matrix to a vector.
static void reduce(Mat src, Mat dst, int dim, int rtype, int dtype)
          Reduces a matrix to a vector.
static void repeat(Mat src, int ny, int nx, Mat dst)
          Fills the output array with repeated copies of the input array.
static void scaleAdd(Mat src1, double alpha, Mat src2, Mat dst)
          Calculates the sum of a scaled array and another array.
static void setErrorVerbosity(boolean verbose)
           
static void setIdentity(Mat mtx)
          Initializes a scaled identity matrix.
static void setIdentity(Mat mtx, Scalar s)
          Initializes a scaled identity matrix.
static boolean solve(Mat src1, Mat src2, Mat dst)
          Solves one or more linear systems or least-squares problems.
static boolean solve(Mat src1, Mat src2, Mat dst, int flags)
          Solves one or more linear systems or least-squares problems.
static int solveCubic(Mat coeffs, Mat roots)
          Finds the real roots of a cubic equation.
static double solvePoly(Mat coeffs, Mat roots)
          Finds the real or complex roots of a polynomial equation.
static double solvePoly(Mat coeffs, Mat roots, int maxIters)
          Finds the real or complex roots of a polynomial equation.
static void sort(Mat src, Mat dst, int flags)
          Sorts each row or each column of a matrix.
static void sortIdx(Mat src, Mat dst, int flags)
          Sorts each row or each column of a matrix.
static void split(Mat m, java.util.List<Mat> mv)
          Divides a multi-channel array into several single-channel arrays.
static void sqrt(Mat src, Mat dst)
          Calculates a square root of array elements.
static void subtract(Mat src1, Mat src2, Mat dst)
          Calculates the per-element difference between two arrays or array and a scalar.
static void subtract(Mat src1, Mat src2, Mat dst, Mat mask)
          Calculates the per-element difference between two arrays or array and a scalar.
static void subtract(Mat src1, Mat src2, Mat dst, Mat mask, int dtype)
          Calculates the per-element difference between two arrays or array and a scalar.
static void subtract(Mat src1, Scalar src2, Mat dst)
          Calculates the per-element difference between two arrays or array and a scalar.
static void subtract(Mat src1, Scalar src2, Mat dst, Mat mask)
          Calculates the per-element difference between two arrays or array and a scalar.
static void subtract(Mat src1, Scalar src2, Mat dst, Mat mask, int dtype)
          Calculates the per-element difference between two arrays or array and a scalar.
static Scalar sumElems(Mat src)
          Calculates the sum of array elements.
static void SVBackSubst(Mat w, Mat u, Mat vt, Mat rhs, Mat dst)
           
static void SVDecomp(Mat src, Mat w, Mat u, Mat vt)
           
static void SVDecomp(Mat src, Mat w, Mat u, Mat vt, int flags)
           
static Scalar trace(Mat mtx)
          Returns the trace of a matrix.
static void transform(Mat src, Mat dst, Mat m)
          Performs the matrix transformation of every array element.
static void transpose(Mat src, Mat dst)
          Transposes a matrix.
static void vconcat(java.util.List<Mat> src, Mat dst)
           
 
Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
 

Field Detail

CMP_EQ

public static final int CMP_EQ
See Also:
Constant Field Values

CMP_GE

public static final int CMP_GE
See Also:
Constant Field Values

CMP_GT

public static final int CMP_GT
See Also:
Constant Field Values

CMP_LE

public static final int CMP_LE
See Also:
Constant Field Values

CMP_LT

public static final int CMP_LT
See Also:
Constant Field Values

CMP_NE

public static final int CMP_NE
See Also:
Constant Field Values

COVAR_COLS

public static final int COVAR_COLS
See Also:
Constant Field Values

COVAR_NORMAL

public static final int COVAR_NORMAL
See Also:
Constant Field Values

COVAR_ROWS

public static final int COVAR_ROWS
See Also:
Constant Field Values

COVAR_SCALE

public static final int COVAR_SCALE
See Also:
Constant Field Values

COVAR_SCRAMBLED

public static final int COVAR_SCRAMBLED
See Also:
Constant Field Values

COVAR_USE_AVG

public static final int COVAR_USE_AVG
See Also:
Constant Field Values

DCT_INVERSE

public static final int DCT_INVERSE
See Also:
Constant Field Values

DCT_ROWS

public static final int DCT_ROWS
See Also:
Constant Field Values

DECOMP_CHOLESKY

public static final int DECOMP_CHOLESKY
See Also:
Constant Field Values

DECOMP_EIG

public static final int DECOMP_EIG
See Also:
Constant Field Values

DECOMP_LU

public static final int DECOMP_LU
See Also:
Constant Field Values

DECOMP_NORMAL

public static final int DECOMP_NORMAL
See Also:
Constant Field Values

DECOMP_QR

public static final int DECOMP_QR
See Also:
Constant Field Values

DECOMP_SVD

public static final int DECOMP_SVD
See Also:
Constant Field Values

DEPTH_MASK

public static final int DEPTH_MASK
See Also:
Constant Field Values

DEPTH_MASK_16S

public static final int DEPTH_MASK_16S
See Also:
Constant Field Values

DEPTH_MASK_16U

public static final int DEPTH_MASK_16U
See Also:
Constant Field Values

DEPTH_MASK_32F

public static final int DEPTH_MASK_32F
See Also:
Constant Field Values

DEPTH_MASK_32S

public static final int DEPTH_MASK_32S
See Also:
Constant Field Values

DEPTH_MASK_64F

public static final int DEPTH_MASK_64F
See Also:
Constant Field Values

DEPTH_MASK_8S

public static final int DEPTH_MASK_8S
See Also:
Constant Field Values

DEPTH_MASK_8U

public static final int DEPTH_MASK_8U
See Also:
Constant Field Values

DEPTH_MASK_ALL

public static final int DEPTH_MASK_ALL
See Also:
Constant Field Values

DEPTH_MASK_ALL_BUT_8S

public static final int DEPTH_MASK_ALL_BUT_8S
See Also:
Constant Field Values

DEPTH_MASK_FLT

public static final int DEPTH_MASK_FLT
See Also:
Constant Field Values

DFT_COMPLEX_OUTPUT

public static final int DFT_COMPLEX_OUTPUT
See Also:
Constant Field Values

DFT_INVERSE

public static final int DFT_INVERSE
See Also:
Constant Field Values

DFT_REAL_OUTPUT

public static final int DFT_REAL_OUTPUT
See Also:
Constant Field Values

DFT_ROWS

public static final int DFT_ROWS
See Also:
Constant Field Values

DFT_SCALE

public static final int DFT_SCALE
See Also:
Constant Field Values

FILLED

public static final int FILLED
See Also:
Constant Field Values

FONT_HERSHEY_COMPLEX

public static final int FONT_HERSHEY_COMPLEX
See Also:
Constant Field Values

FONT_HERSHEY_COMPLEX_SMALL

public static final int FONT_HERSHEY_COMPLEX_SMALL
See Also:
Constant Field Values

FONT_HERSHEY_DUPLEX

public static final int FONT_HERSHEY_DUPLEX
See Also:
Constant Field Values

FONT_HERSHEY_PLAIN

public static final int FONT_HERSHEY_PLAIN
See Also:
Constant Field Values

FONT_HERSHEY_SCRIPT_COMPLEX

public static final int FONT_HERSHEY_SCRIPT_COMPLEX
See Also:
Constant Field Values

FONT_HERSHEY_SCRIPT_SIMPLEX

public static final int FONT_HERSHEY_SCRIPT_SIMPLEX
See Also:
Constant Field Values

FONT_HERSHEY_SIMPLEX

public static final int FONT_HERSHEY_SIMPLEX
See Also:
Constant Field Values

FONT_HERSHEY_TRIPLEX

public static final int FONT_HERSHEY_TRIPLEX
See Also:
Constant Field Values

FONT_ITALIC

public static final int FONT_ITALIC
See Also:
Constant Field Values

GEMM_1_T

public static final int GEMM_1_T
See Also:
Constant Field Values

GEMM_2_T

public static final int GEMM_2_T
See Also:
Constant Field Values

GEMM_3_T

public static final int GEMM_3_T
See Also:
Constant Field Values

KMEANS_PP_CENTERS

public static final int KMEANS_PP_CENTERS
See Also:
Constant Field Values

KMEANS_RANDOM_CENTERS

public static final int KMEANS_RANDOM_CENTERS
See Also:
Constant Field Values

KMEANS_USE_INITIAL_LABELS

public static final int KMEANS_USE_INITIAL_LABELS
See Also:
Constant Field Values

LINE_4

public static final int LINE_4
See Also:
Constant Field Values

LINE_8

public static final int LINE_8
See Also:
Constant Field Values

LINE_AA

public static final int LINE_AA
See Also:
Constant Field Values

MAGIC_MASK

public static final int MAGIC_MASK
See Also:
Constant Field Values

NATIVE_LIBRARY_NAME

public static final java.lang.String NATIVE_LIBRARY_NAME

NORM_HAMMING

public static final int NORM_HAMMING
See Also:
Constant Field Values

NORM_HAMMING2

public static final int NORM_HAMMING2
See Also:
Constant Field Values

NORM_INF

public static final int NORM_INF
See Also:
Constant Field Values

NORM_L1

public static final int NORM_L1
See Also:
Constant Field Values

NORM_L2

public static final int NORM_L2
See Also:
Constant Field Values

NORM_L2SQR

public static final int NORM_L2SQR
See Also:
Constant Field Values

NORM_MINMAX

public static final int NORM_MINMAX
See Also:
Constant Field Values

NORM_RELATIVE

public static final int NORM_RELATIVE
See Also:
Constant Field Values

NORM_TYPE_MASK

public static final int NORM_TYPE_MASK
See Also:
Constant Field Values

REDUCE_AVG

public static final int REDUCE_AVG
See Also:
Constant Field Values

REDUCE_MAX

public static final int REDUCE_MAX
See Also:
Constant Field Values

REDUCE_MIN

public static final int REDUCE_MIN
See Also:
Constant Field Values

REDUCE_SUM

public static final int REDUCE_SUM
See Also:
Constant Field Values

SORT_ASCENDING

public static final int SORT_ASCENDING
See Also:
Constant Field Values

SORT_DESCENDING

public static final int SORT_DESCENDING
See Also:
Constant Field Values

SORT_EVERY_COLUMN

public static final int SORT_EVERY_COLUMN
See Also:
Constant Field Values

SORT_EVERY_ROW

public static final int SORT_EVERY_ROW
See Also:
Constant Field Values

SVD_FULL_UV

public static final int SVD_FULL_UV
See Also:
Constant Field Values

SVD_MODIFY_A

public static final int SVD_MODIFY_A
See Also:
Constant Field Values

SVD_NO_UV

public static final int SVD_NO_UV
See Also:
Constant Field Values

TYPE_MASK

public static final int TYPE_MASK
See Also:
Constant Field Values

VERSION

public static final java.lang.String VERSION

VERSION_EPOCH

public static final int VERSION_EPOCH

VERSION_MAJOR

public static final int VERSION_MAJOR

VERSION_MINOR

public static final int VERSION_MINOR

VERSION_REVISION

public static final int VERSION_REVISION
Constructor Detail

Core

public Core()
Method Detail

absdiff

public static void absdiff(Mat src1,
                           Mat src2,
                           Mat dst)

Calculates the per-element absolute difference between two arrays or between an array and a scalar.

The function absdiff calculates:

dst(I) = saturate(| src1(I) - src2(I)|)

dst(I) = saturate(| src1(I) - src2|)

dst(I) = saturate(| src1 - src2(I)|)

where I is a multi-dimensional index of array elements. In case of multi-channel arrays, each channel is processed independently.

Note: Saturation is not applied when the arrays have the depth CV_32S. You may even get a negative value in the case of overflow.

Parameters:
src1 - first input array or a scalar.
src2 - second input array or a scalar.
dst - output array that has the same size and type as input arrays.
See Also:
org.opencv.core.Core.absdiff

absdiff

public static void absdiff(Mat src1,
                           Scalar src2,
                           Mat dst)

Calculates the per-element absolute difference between two arrays or between an array and a scalar.

The function absdiff calculates:

dst(I) = saturate(| src1(I) - src2(I)|)

dst(I) = saturate(| src1(I) - src2|)

dst(I) = saturate(| src1 - src2(I)|)

where I is a multi-dimensional index of array elements. In case of multi-channel arrays, each channel is processed independently.

Note: Saturation is not applied when the arrays have the depth CV_32S. You may even get a negative value in the case of overflow.

Parameters:
src1 - first input array or a scalar.
src2 - second input array or a scalar.
dst - output array that has the same size and type as input arrays.
See Also:
org.opencv.core.Core.absdiff

add

public static void add(Mat src1,
                       Mat src2,
                       Mat dst)

Calculates the per-element sum of two arrays or an array and a scalar.

The function add calculates:

dst(I) = saturate(src1(I) + src2(I)) if mask(I) != 0

dst(I) = saturate(src1(I) + src2) if mask(I) != 0

dst(I) = saturate(src1 + src2(I)) if mask(I) != 0

where I is a multi-dimensional index of array elements. In case of multi-channel arrays, each channel is processed independently. The first function in the list above can be replaced with matrix expressions:

// C++ code:

dst = src1 + src2;

dst += src1; // equivalent to add(dst, src1, dst);

The input arrays and the output array can all have the same or different depths. For example, you can add a 16-bit unsigned array to a 8-bit signed array and store the sum as a 32-bit floating-point array. Depth of the output array is determined by the dtype parameter. In the second and third cases above, as well as in the first case, when src1.depth() == src2.depth(), dtype can be set to the default -1. In this case, the output array will have the same depth as the input array, be it src1, src2 or both.

Note: Saturation is not applied when the output array has the depth CV_32S. You may even get result of an incorrect sign in the case of overflow.

Parameters:
src1 - first input array or a scalar.
src2 - second input array or a scalar.
dst - output array that has the same size and number of channels as the input array(s); the depth is defined by dtype or src1/src2.
See Also:
org.opencv.core.Core.add, addWeighted(org.opencv.core.Mat, double, org.opencv.core.Mat, double, double, org.opencv.core.Mat, int), Mat.convertTo(org.opencv.core.Mat, int, double, double), scaleAdd(org.opencv.core.Mat, double, org.opencv.core.Mat, org.opencv.core.Mat), subtract(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int)

add

public static void add(Mat src1,
                       Mat src2,
                       Mat dst,
                       Mat mask)

Calculates the per-element sum of two arrays or an array and a scalar.

The function add calculates:

dst(I) = saturate(src1(I) + src2(I)) if mask(I) != 0

dst(I) = saturate(src1(I) + src2) if mask(I) != 0

dst(I) = saturate(src1 + src2(I)) if mask(I) != 0

where I is a multi-dimensional index of array elements. In case of multi-channel arrays, each channel is processed independently. The first function in the list above can be replaced with matrix expressions:

// C++ code:

dst = src1 + src2;

dst += src1; // equivalent to add(dst, src1, dst);

The input arrays and the output array can all have the same or different depths. For example, you can add a 16-bit unsigned array to a 8-bit signed array and store the sum as a 32-bit floating-point array. Depth of the output array is determined by the dtype parameter. In the second and third cases above, as well as in the first case, when src1.depth() == src2.depth(), dtype can be set to the default -1. In this case, the output array will have the same depth as the input array, be it src1, src2 or both.

Note: Saturation is not applied when the output array has the depth CV_32S. You may even get result of an incorrect sign in the case of overflow.

Parameters:
src1 - first input array or a scalar.
src2 - second input array or a scalar.
dst - output array that has the same size and number of channels as the input array(s); the depth is defined by dtype or src1/src2.
mask - optional operation mask - 8-bit single channel array, that specifies elements of the output array to be changed.
See Also:
org.opencv.core.Core.add, addWeighted(org.opencv.core.Mat, double, org.opencv.core.Mat, double, double, org.opencv.core.Mat, int), Mat.convertTo(org.opencv.core.Mat, int, double, double), scaleAdd(org.opencv.core.Mat, double, org.opencv.core.Mat, org.opencv.core.Mat), subtract(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int)

add

public static void add(Mat src1,
                       Mat src2,
                       Mat dst,
                       Mat mask,
                       int dtype)

Calculates the per-element sum of two arrays or an array and a scalar.

The function add calculates:

dst(I) = saturate(src1(I) + src2(I)) if mask(I) != 0

dst(I) = saturate(src1(I) + src2) if mask(I) != 0

dst(I) = saturate(src1 + src2(I)) if mask(I) != 0

where I is a multi-dimensional index of array elements. In case of multi-channel arrays, each channel is processed independently. The first function in the list above can be replaced with matrix expressions:

// C++ code:

dst = src1 + src2;

dst += src1; // equivalent to add(dst, src1, dst);

The input arrays and the output array can all have the same or different depths. For example, you can add a 16-bit unsigned array to a 8-bit signed array and store the sum as a 32-bit floating-point array. Depth of the output array is determined by the dtype parameter. In the second and third cases above, as well as in the first case, when src1.depth() == src2.depth(), dtype can be set to the default -1. In this case, the output array will have the same depth as the input array, be it src1, src2 or both.

Note: Saturation is not applied when the output array has the depth CV_32S. You may even get result of an incorrect sign in the case of overflow.

Parameters:
src1 - first input array or a scalar.
src2 - second input array or a scalar.
dst - output array that has the same size and number of channels as the input array(s); the depth is defined by dtype or src1/src2.
mask - optional operation mask - 8-bit single channel array, that specifies elements of the output array to be changed.
dtype - optional depth of the output array (see the discussion below).
See Also:
org.opencv.core.Core.add, addWeighted(org.opencv.core.Mat, double, org.opencv.core.Mat, double, double, org.opencv.core.Mat, int), Mat.convertTo(org.opencv.core.Mat, int, double, double), scaleAdd(org.opencv.core.Mat, double, org.opencv.core.Mat, org.opencv.core.Mat), subtract(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int)

add

public static void add(Mat src1,
                       Scalar src2,
                       Mat dst)

Calculates the per-element sum of two arrays or an array and a scalar.

The function add calculates:

dst(I) = saturate(src1(I) + src2(I)) if mask(I) != 0

dst(I) = saturate(src1(I) + src2) if mask(I) != 0

dst(I) = saturate(src1 + src2(I)) if mask(I) != 0

where I is a multi-dimensional index of array elements. In case of multi-channel arrays, each channel is processed independently. The first function in the list above can be replaced with matrix expressions:

// C++ code:

dst = src1 + src2;

dst += src1; // equivalent to add(dst, src1, dst);

The input arrays and the output array can all have the same or different depths. For example, you can add a 16-bit unsigned array to a 8-bit signed array and store the sum as a 32-bit floating-point array. Depth of the output array is determined by the dtype parameter. In the second and third cases above, as well as in the first case, when src1.depth() == src2.depth(), dtype can be set to the default -1. In this case, the output array will have the same depth as the input array, be it src1, src2 or both.

Note: Saturation is not applied when the output array has the depth CV_32S. You may even get result of an incorrect sign in the case of overflow.

Parameters:
src1 - first input array or a scalar.
src2 - second input array or a scalar.
dst - output array that has the same size and number of channels as the input array(s); the depth is defined by dtype or src1/src2.
See Also:
org.opencv.core.Core.add, addWeighted(org.opencv.core.Mat, double, org.opencv.core.Mat, double, double, org.opencv.core.Mat, int), Mat.convertTo(org.opencv.core.Mat, int, double, double), scaleAdd(org.opencv.core.Mat, double, org.opencv.core.Mat, org.opencv.core.Mat), subtract(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int)

add

public static void add(Mat src1,
                       Scalar src2,
                       Mat dst,
                       Mat mask)

Calculates the per-element sum of two arrays or an array and a scalar.

The function add calculates:

dst(I) = saturate(src1(I) + src2(I)) if mask(I) != 0

dst(I) = saturate(src1(I) + src2) if mask(I) != 0

dst(I) = saturate(src1 + src2(I)) if mask(I) != 0

where I is a multi-dimensional index of array elements. In case of multi-channel arrays, each channel is processed independently. The first function in the list above can be replaced with matrix expressions:

// C++ code:

dst = src1 + src2;

dst += src1; // equivalent to add(dst, src1, dst);

The input arrays and the output array can all have the same or different depths. For example, you can add a 16-bit unsigned array to a 8-bit signed array and store the sum as a 32-bit floating-point array. Depth of the output array is determined by the dtype parameter. In the second and third cases above, as well as in the first case, when src1.depth() == src2.depth(), dtype can be set to the default -1. In this case, the output array will have the same depth as the input array, be it src1, src2 or both.

Note: Saturation is not applied when the output array has the depth CV_32S. You may even get result of an incorrect sign in the case of overflow.

Parameters:
src1 - first input array or a scalar.
src2 - second input array or a scalar.
dst - output array that has the same size and number of channels as the input array(s); the depth is defined by dtype or src1/src2.
mask - optional operation mask - 8-bit single channel array, that specifies elements of the output array to be changed.
See Also:
org.opencv.core.Core.add, addWeighted(org.opencv.core.Mat, double, org.opencv.core.Mat, double, double, org.opencv.core.Mat, int), Mat.convertTo(org.opencv.core.Mat, int, double, double), scaleAdd(org.opencv.core.Mat, double, org.opencv.core.Mat, org.opencv.core.Mat), subtract(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int)

add

public static void add(Mat src1,
                       Scalar src2,
                       Mat dst,
                       Mat mask,
                       int dtype)

Calculates the per-element sum of two arrays or an array and a scalar.

The function add calculates:

dst(I) = saturate(src1(I) + src2(I)) if mask(I) != 0

dst(I) = saturate(src1(I) + src2) if mask(I) != 0

dst(I) = saturate(src1 + src2(I)) if mask(I) != 0

where I is a multi-dimensional index of array elements. In case of multi-channel arrays, each channel is processed independently. The first function in the list above can be replaced with matrix expressions:

// C++ code:

dst = src1 + src2;

dst += src1; // equivalent to add(dst, src1, dst);

The input arrays and the output array can all have the same or different depths. For example, you can add a 16-bit unsigned array to a 8-bit signed array and store the sum as a 32-bit floating-point array. Depth of the output array is determined by the dtype parameter. In the second and third cases above, as well as in the first case, when src1.depth() == src2.depth(), dtype can be set to the default -1. In this case, the output array will have the same depth as the input array, be it src1, src2 or both.

Note: Saturation is not applied when the output array has the depth CV_32S. You may even get result of an incorrect sign in the case of overflow.

Parameters:
src1 - first input array or a scalar.
src2 - second input array or a scalar.
dst - output array that has the same size and number of channels as the input array(s); the depth is defined by dtype or src1/src2.
mask - optional operation mask - 8-bit single channel array, that specifies elements of the output array to be changed.
dtype - optional depth of the output array (see the discussion below).
See Also:
org.opencv.core.Core.add, addWeighted(org.opencv.core.Mat, double, org.opencv.core.Mat, double, double, org.opencv.core.Mat, int), Mat.convertTo(org.opencv.core.Mat, int, double, double), scaleAdd(org.opencv.core.Mat, double, org.opencv.core.Mat, org.opencv.core.Mat), subtract(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int)

addWeighted

public static void addWeighted(Mat src1,
                               double alpha,
                               Mat src2,
                               double beta,
                               double gamma,
                               Mat dst)

Calculates the weighted sum of two arrays.

The function addWeighted calculates the weighted sum of two arrays as follows:

dst(I)= saturate(src1(I)* alpha + src2(I)* beta + gamma)

where I is a multi-dimensional index of array elements. In case of multi-channel arrays, each channel is processed independently. The function can be replaced with a matrix expression:

// C++ code:

dst = src1*alpha + src2*beta + gamma;

Note: Saturation is not applied when the output array has the depth CV_32S. You may even get result of an incorrect sign in the case of overflow.

Parameters:
src1 - first input array.
alpha - weight of the first array elements.
src2 - second input array of the same size and channel number as src1.
beta - weight of the second array elements.
gamma - scalar added to each sum.
dst - output array that has the same size and number of channels as the input arrays.
See Also:
org.opencv.core.Core.addWeighted, add(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), scaleAdd(org.opencv.core.Mat, double, org.opencv.core.Mat, org.opencv.core.Mat), subtract(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), Mat.convertTo(org.opencv.core.Mat, int, double, double)

addWeighted

public static void addWeighted(Mat src1,
                               double alpha,
                               Mat src2,
                               double beta,
                               double gamma,
                               Mat dst,
                               int dtype)

Calculates the weighted sum of two arrays.

The function addWeighted calculates the weighted sum of two arrays as follows:

dst(I)= saturate(src1(I)* alpha + src2(I)* beta + gamma)

where I is a multi-dimensional index of array elements. In case of multi-channel arrays, each channel is processed independently. The function can be replaced with a matrix expression:

// C++ code:

dst = src1*alpha + src2*beta + gamma;

Note: Saturation is not applied when the output array has the depth CV_32S. You may even get result of an incorrect sign in the case of overflow.

Parameters:
src1 - first input array.
alpha - weight of the first array elements.
src2 - second input array of the same size and channel number as src1.
beta - weight of the second array elements.
gamma - scalar added to each sum.
dst - output array that has the same size and number of channels as the input arrays.
dtype - optional depth of the output array; when both input arrays have the same depth, dtype can be set to -1, which will be equivalent to src1.depth().
See Also:
org.opencv.core.Core.addWeighted, add(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), scaleAdd(org.opencv.core.Mat, double, org.opencv.core.Mat, org.opencv.core.Mat), subtract(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), Mat.convertTo(org.opencv.core.Mat, int, double, double)

batchDistance

public static void batchDistance(Mat src1,
                                 Mat src2,
                                 Mat dist,
                                 int dtype,
                                 Mat nidx)

batchDistance

public static void batchDistance(Mat src1,
                                 Mat src2,
                                 Mat dist,
                                 int dtype,
                                 Mat nidx,
                                 int normType,
                                 int K)

batchDistance

public static void batchDistance(Mat src1,
                                 Mat src2,
                                 Mat dist,
                                 int dtype,
                                 Mat nidx,
                                 int normType,
                                 int K,
                                 Mat mask,
                                 int update,
                                 boolean crosscheck)

bitwise_and

public static void bitwise_and(Mat src1,
                               Mat src2,
                               Mat dst)

Calculates the per-element bit-wise conjunction of two arrays or an array and a scalar.

The function calculates the per-element bit-wise logical conjunction for:

dst(I) = src1(I) / src2(I) if mask(I) != 0

dst(I) = src1(I) / src2 if mask(I) != 0

dst(I) = src1 / src2(I) if mask(I) != 0

In case of floating-point arrays, their machine-specific bit representations (usually IEEE754-compliant) are used for the operation. In case of multi-channel arrays, each channel is processed independently. In the second and third cases above, the scalar is first converted to the array type.

Parameters:
src1 - first input array or a scalar.
src2 - second input array or a scalar.
dst - output array that has the same size and type as the input arrays.
See Also:
org.opencv.core.Core.bitwise_and

bitwise_and

public static void bitwise_and(Mat src1,
                               Mat src2,
                               Mat dst,
                               Mat mask)

Calculates the per-element bit-wise conjunction of two arrays or an array and a scalar.

The function calculates the per-element bit-wise logical conjunction for:

dst(I) = src1(I) / src2(I) if mask(I) != 0

dst(I) = src1(I) / src2 if mask(I) != 0

dst(I) = src1 / src2(I) if mask(I) != 0

In case of floating-point arrays, their machine-specific bit representations (usually IEEE754-compliant) are used for the operation. In case of multi-channel arrays, each channel is processed independently. In the second and third cases above, the scalar is first converted to the array type.

Parameters:
src1 - first input array or a scalar.
src2 - second input array or a scalar.
dst - output array that has the same size and type as the input arrays.
mask - optional operation mask, 8-bit single channel array, that specifies elements of the output array to be changed.
See Also:
org.opencv.core.Core.bitwise_and

bitwise_not

public static void bitwise_not(Mat src,
                               Mat dst)

Inverts every bit of an array.

The function calculates per-element bit-wise inversion of the input array:

dst(I) = !src(I)

In case of a floating-point input array, its machine-specific bit representation (usually IEEE754-compliant) is used for the operation. In case of multi-channel arrays, each channel is processed independently.

Parameters:
src - input array.
dst - output array that has the same size and type as the input array.
See Also:
org.opencv.core.Core.bitwise_not

bitwise_not

public static void bitwise_not(Mat src,
                               Mat dst,
                               Mat mask)

Inverts every bit of an array.

The function calculates per-element bit-wise inversion of the input array:

dst(I) = !src(I)

In case of a floating-point input array, its machine-specific bit representation (usually IEEE754-compliant) is used for the operation. In case of multi-channel arrays, each channel is processed independently.

Parameters:
src - input array.
dst - output array that has the same size and type as the input array.
mask - optional operation mask, 8-bit single channel array, that specifies elements of the output array to be changed.
See Also:
org.opencv.core.Core.bitwise_not

bitwise_or

public static void bitwise_or(Mat src1,
                              Mat src2,
                              Mat dst)

Calculates the per-element bit-wise disjunction of two arrays or an array and a scalar.

The function calculates the per-element bit-wise logical disjunction for:

dst(I) = src1(I) V src2(I) if mask(I) != 0

dst(I) = src1(I) V src2 if mask(I) != 0

dst(I) = src1 V src2(I) if mask(I) != 0

In case of floating-point arrays, their machine-specific bit representations (usually IEEE754-compliant) are used for the operation. In case of multi-channel arrays, each channel is processed independently. In the second and third cases above, the scalar is first converted to the array type.

Parameters:
src1 - first input array or a scalar.
src2 - second input array or a scalar.
dst - output array that has the same size and type as the input arrays.
See Also:
org.opencv.core.Core.bitwise_or

bitwise_or

public static void bitwise_or(Mat src1,
                              Mat src2,
                              Mat dst,
                              Mat mask)

Calculates the per-element bit-wise disjunction of two arrays or an array and a scalar.

The function calculates the per-element bit-wise logical disjunction for:

dst(I) = src1(I) V src2(I) if mask(I) != 0

dst(I) = src1(I) V src2 if mask(I) != 0

dst(I) = src1 V src2(I) if mask(I) != 0

In case of floating-point arrays, their machine-specific bit representations (usually IEEE754-compliant) are used for the operation. In case of multi-channel arrays, each channel is processed independently. In the second and third cases above, the scalar is first converted to the array type.

Parameters:
src1 - first input array or a scalar.
src2 - second input array or a scalar.
dst - output array that has the same size and type as the input arrays.
mask - optional operation mask, 8-bit single channel array, that specifies elements of the output array to be changed.
See Also:
org.opencv.core.Core.bitwise_or

bitwise_xor

public static void bitwise_xor(Mat src1,
                               Mat src2,
                               Mat dst)

Calculates the per-element bit-wise "exclusive or" operation on two arrays or an array and a scalar.

The function calculates the per-element bit-wise logical "exclusive-or" operation for:

dst(I) = src1(I)(+) src2(I) if mask(I) != 0

dst(I) = src1(I)(+) src2 if mask(I) != 0

dst(I) = src1(+) src2(I) if mask(I) != 0

In case of floating-point arrays, their machine-specific bit representations (usually IEEE754-compliant) are used for the operation. In case of multi-channel arrays, each channel is processed independently. In the 2nd and 3rd cases above, the scalar is first converted to the array type.

Parameters:
src1 - first input array or a scalar.
src2 - second input array or a scalar.
dst - output array that has the same size and type as the input arrays.
See Also:
org.opencv.core.Core.bitwise_xor

bitwise_xor

public static void bitwise_xor(Mat src1,
                               Mat src2,
                               Mat dst,
                               Mat mask)

Calculates the per-element bit-wise "exclusive or" operation on two arrays or an array and a scalar.

The function calculates the per-element bit-wise logical "exclusive-or" operation for:

dst(I) = src1(I)(+) src2(I) if mask(I) != 0

dst(I) = src1(I)(+) src2 if mask(I) != 0

dst(I) = src1(+) src2(I) if mask(I) != 0

In case of floating-point arrays, their machine-specific bit representations (usually IEEE754-compliant) are used for the operation. In case of multi-channel arrays, each channel is processed independently. In the 2nd and 3rd cases above, the scalar is first converted to the array type.

Parameters:
src1 - first input array or a scalar.
src2 - second input array or a scalar.
dst - output array that has the same size and type as the input arrays.
mask - optional operation mask, 8-bit single channel array, that specifies elements of the output array to be changed.
See Also:
org.opencv.core.Core.bitwise_xor

calcCovarMatrix

public static void calcCovarMatrix(Mat samples,
                                   Mat covar,
                                   Mat mean,
                                   int flags)

Calculates the covariance matrix of a set of vectors.

The functions calcCovarMatrix calculate the covariance matrix and, optionally, the mean vector of the set of input vectors.

Parameters:
samples - samples stored either as separate matrices or as rows/columns of a single matrix.
covar - output covariance matrix of the type ctype and square size.
mean - input or output (depending on the flags) array as the average value of the input vectors.
flags - operation flags as a combination of the following values:
  • CV_COVAR_SCRAMBLED The output covariance matrix is calculated as:

scale * [ vects [0]- mean, vects [1]- mean,...]^T * [ vects [0]- mean, vects [1]- mean,...],

The covariance matrix will be nsamples x nsamples. Such an unusual covariance matrix is used for fast PCA of a set of very large vectors (see, for example, the EigenFaces technique for face recognition). Eigenvalues of this "scrambled" matrix match the eigenvalues of the true covariance matrix. The "true" eigenvectors can be easily calculated from the eigenvectors of the "scrambled" covariance matrix.

  • CV_COVAR_NORMAL The output covariance matrix is calculated as:

scale * [ vects [0]- mean, vects [1]- mean,...] * [ vects [0]- mean, vects [1]- mean,...]^T,

covar will be a square matrix of the same size as the total number of elements in each input vector. One and only one of CV_COVAR_SCRAMBLED and CV_COVAR_NORMAL must be specified.

  • CV_COVAR_USE_AVG If the flag is specified, the function does not calculate mean from the input vectors but, instead, uses the passed mean vector. This is useful if mean has been pre-calculated or known in advance, or if the covariance matrix is calculated by parts. In this case, mean is not a mean vector of the input sub-set of vectors but rather the mean vector of the whole set.
  • CV_COVAR_SCALE If the flag is specified, the covariance matrix is scaled. In the "normal" mode, scale is 1./nsamples. In the "scrambled" mode, scale is the reciprocal of the total number of elements in each input vector. By default (if the flag is not specified), the covariance matrix is not scaled (scale=1).
  • CV_COVAR_ROWS [Only useful in the second variant of the function] If the flag is specified, all the input vectors are stored as rows of the samples matrix. mean should be a single-row vector in this case.
  • CV_COVAR_COLS [Only useful in the second variant of the function] If the flag is specified, all the input vectors are stored as columns of the samples matrix. mean should be a single-column vector in this case.
See Also:
org.opencv.core.Core.calcCovarMatrix, Mahalanobis(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat), mulTransposed(org.opencv.core.Mat, org.opencv.core.Mat, boolean, org.opencv.core.Mat, double, int)

calcCovarMatrix

public static void calcCovarMatrix(Mat samples,
                                   Mat covar,
                                   Mat mean,
                                   int flags,
                                   int ctype)

Calculates the covariance matrix of a set of vectors.

The functions calcCovarMatrix calculate the covariance matrix and, optionally, the mean vector of the set of input vectors.

Parameters:
samples - samples stored either as separate matrices or as rows/columns of a single matrix.
covar - output covariance matrix of the type ctype and square size.
mean - input or output (depending on the flags) array as the average value of the input vectors.
flags - operation flags as a combination of the following values:
  • CV_COVAR_SCRAMBLED The output covariance matrix is calculated as:

scale * [ vects [0]- mean, vects [1]- mean,...]^T * [ vects [0]- mean, vects [1]- mean,...],

The covariance matrix will be nsamples x nsamples. Such an unusual covariance matrix is used for fast PCA of a set of very large vectors (see, for example, the EigenFaces technique for face recognition). Eigenvalues of this "scrambled" matrix match the eigenvalues of the true covariance matrix. The "true" eigenvectors can be easily calculated from the eigenvectors of the "scrambled" covariance matrix.

  • CV_COVAR_NORMAL The output covariance matrix is calculated as:

scale * [ vects [0]- mean, vects [1]- mean,...] * [ vects [0]- mean, vects [1]- mean,...]^T,

covar will be a square matrix of the same size as the total number of elements in each input vector. One and only one of CV_COVAR_SCRAMBLED and CV_COVAR_NORMAL must be specified.

  • CV_COVAR_USE_AVG If the flag is specified, the function does not calculate mean from the input vectors but, instead, uses the passed mean vector. This is useful if mean has been pre-calculated or known in advance, or if the covariance matrix is calculated by parts. In this case, mean is not a mean vector of the input sub-set of vectors but rather the mean vector of the whole set.
  • CV_COVAR_SCALE If the flag is specified, the covariance matrix is scaled. In the "normal" mode, scale is 1./nsamples. In the "scrambled" mode, scale is the reciprocal of the total number of elements in each input vector. By default (if the flag is not specified), the covariance matrix is not scaled (scale=1).
  • CV_COVAR_ROWS [Only useful in the second variant of the function] If the flag is specified, all the input vectors are stored as rows of the samples matrix. mean should be a single-row vector in this case.
  • CV_COVAR_COLS [Only useful in the second variant of the function] If the flag is specified, all the input vectors are stored as columns of the samples matrix. mean should be a single-column vector in this case.
ctype - type of the matrixl; it equals 'CV_64F' by default.
See Also:
org.opencv.core.Core.calcCovarMatrix, Mahalanobis(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat), mulTransposed(org.opencv.core.Mat, org.opencv.core.Mat, boolean, org.opencv.core.Mat, double, int)

cartToPolar

public static void cartToPolar(Mat x,
                               Mat y,
                               Mat magnitude,
                               Mat angle)

Calculates the magnitude and angle of 2D vectors.

The function cartToPolar calculates either the magnitude, angle, or both for every 2D vector (x(I),y(I)):

magnitude(I)= sqrt(x(I)^2+y(I)^2), angle(I)= atan2(y(I), x(I))[ *180 / pi ]

The angles are calculated with accuracy about 0.3 degrees. For the point (0,0), the angle is set to 0.

Parameters:
x - array of x-coordinates; this must be a single-precision or double-precision floating-point array.
y - array of y-coordinates, that must have the same size and same type as x.
magnitude - output array of magnitudes of the same size and type as x.
angle - output array of angles that has the same size and type as x; the angles are measured in radians (from 0 to 2*Pi) or in degrees (0 to 360 degrees).
See Also:
org.opencv.core.Core.cartToPolar, Imgproc.Scharr(org.opencv.core.Mat, org.opencv.core.Mat, int, int, int, double, double, int), Imgproc.Sobel(org.opencv.core.Mat, org.opencv.core.Mat, int, int, int, int, double, double, int)

cartToPolar

public static void cartToPolar(Mat x,
                               Mat y,
                               Mat magnitude,
                               Mat angle,
                               boolean angleInDegrees)

Calculates the magnitude and angle of 2D vectors.

The function cartToPolar calculates either the magnitude, angle, or both for every 2D vector (x(I),y(I)):

magnitude(I)= sqrt(x(I)^2+y(I)^2), angle(I)= atan2(y(I), x(I))[ *180 / pi ]

The angles are calculated with accuracy about 0.3 degrees. For the point (0,0), the angle is set to 0.

Parameters:
x - array of x-coordinates; this must be a single-precision or double-precision floating-point array.
y - array of y-coordinates, that must have the same size and same type as x.
magnitude - output array of magnitudes of the same size and type as x.
angle - output array of angles that has the same size and type as x; the angles are measured in radians (from 0 to 2*Pi) or in degrees (0 to 360 degrees).
angleInDegrees - a flag, indicating whether the angles are measured in radians (which is by default), or in degrees.
See Also:
org.opencv.core.Core.cartToPolar, Imgproc.Scharr(org.opencv.core.Mat, org.opencv.core.Mat, int, int, int, double, double, int), Imgproc.Sobel(org.opencv.core.Mat, org.opencv.core.Mat, int, int, int, int, double, double, int)

checkRange

public static boolean checkRange(Mat a)

Checks every element of an input array for invalid values.

The functions checkRange check that every array element is neither NaN nor infinite. When minVal < -DBL_MAX and maxVal < DBL_MAX, the functions also check that each value is between minVal and maxVal. In case of multi-channel arrays, each channel is processed independently. If some values are out of range, position of the first outlier is stored in pos (when pos != NULL). Then, the functions either return false (when quiet=true) or throw an exception.

Parameters:
a - input array.
See Also:
org.opencv.core.Core.checkRange

checkRange

public static boolean checkRange(Mat a,
                                 boolean quiet,
                                 double minVal,
                                 double maxVal)

Checks every element of an input array for invalid values.

The functions checkRange check that every array element is neither NaN nor infinite. When minVal < -DBL_MAX and maxVal < DBL_MAX, the functions also check that each value is between minVal and maxVal. In case of multi-channel arrays, each channel is processed independently. If some values are out of range, position of the first outlier is stored in pos (when pos != NULL). Then, the functions either return false (when quiet=true) or throw an exception.

Parameters:
a - input array.
quiet - a flag, indicating whether the functions quietly return false when the array elements are out of range or they throw an exception.
minVal - inclusive lower boundary of valid values range.
maxVal - exclusive upper boundary of valid values range.
See Also:
org.opencv.core.Core.checkRange

circle

public static void circle(Mat img,
                          Point center,
                          int radius,
                          Scalar color)

Draws a circle.

The function circle draws a simple or filled circle with a given center and radius.

Parameters:
img - Image where the circle is drawn.
center - Center of the circle.
radius - Radius of the circle.
color - Circle color.
See Also:
org.opencv.core.Core.circle

circle

public static void circle(Mat img,
                          Point center,
                          int radius,
                          Scalar color,
                          int thickness)

Draws a circle.

The function circle draws a simple or filled circle with a given center and radius.

Parameters:
img - Image where the circle is drawn.
center - Center of the circle.
radius - Radius of the circle.
color - Circle color.
thickness - Thickness of the circle outline, if positive. Negative thickness means that a filled circle is to be drawn.
See Also:
org.opencv.core.Core.circle

circle

public static void circle(Mat img,
                          Point center,
                          int radius,
                          Scalar color,
                          int thickness,
                          int lineType,
                          int shift)

Draws a circle.

The function circle draws a simple or filled circle with a given center and radius.

Parameters:
img - Image where the circle is drawn.
center - Center of the circle.
radius - Radius of the circle.
color - Circle color.
thickness - Thickness of the circle outline, if positive. Negative thickness means that a filled circle is to be drawn.
lineType - Type of the circle boundary. See the "line" description.
shift - Number of fractional bits in the coordinates of the center and in the radius value.
See Also:
org.opencv.core.Core.circle

clipLine

public static boolean clipLine(Rect imgRect,
                               Point pt1,
                               Point pt2)

Clips the line against the image rectangle.

The functions clipLine calculate a part of the line segment that is entirely within the specified rectangle. They return false if the line segment is completely outside the rectangle. Otherwise, they return true.

Parameters:
imgRect - Image rectangle.
pt1 - First line point.
pt2 - Second line point.
See Also:
org.opencv.core.Core.clipLine

compare

public static void compare(Mat src1,
                           Mat src2,
                           Mat dst,
                           int cmpop)

Performs the per-element comparison of two arrays or an array and scalar value.

The function compares:

dst(I) = src1(I) cmpop src2(I)

dst(I) = src1(I) cmpop src2

dst(I) = src1 cmpop src2(I)

When the comparison result is true, the corresponding element of output array is set to 255.The comparison operations can be replaced with the equivalent matrix expressions:

// C++ code:

Mat dst1 = src1 >= src2;

Mat dst2 = src1 < 8;...

Parameters:
src1 - first input array or a scalar (in the case of cvCmp, cv.Cmp, cvCmpS, cv.CmpS it is always an array); when it is an array, it must have a single channel.
src2 - second input array or a scalar (in the case of cvCmp and cv.Cmp it is always an array; in the case of cvCmpS, cv.CmpS it is always a scalar); when it is an array, it must have a single channel.
dst - output array that has the same size and type as the input arrays.
cmpop - a flag, that specifies correspondence between the arrays:
  • CMP_EQ src1 is equal to src2.
  • CMP_GT src1 is greater than src2.
  • CMP_GE src1 is greater than or equal to src2.
  • CMP_LT src1 is less than src2.
  • CMP_LE src1 is less than or equal to src2.
  • CMP_NE src1 is unequal to src2.
See Also:
org.opencv.core.Core.compare, Imgproc.threshold(org.opencv.core.Mat, org.opencv.core.Mat, double, double, int), max(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat), checkRange(org.opencv.core.Mat, boolean, double, double), min(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat)

compare

public static void compare(Mat src1,
                           Scalar src2,
                           Mat dst,
                           int cmpop)

Performs the per-element comparison of two arrays or an array and scalar value.

The function compares:

  • Elements of two arrays when src1 and src2 have the same size:

dst(I) = src1(I) cmpop src2(I)

  • Elements of src1 with a scalar src2 when src2 is constructed from Scalar or has a single element:

dst(I) = src1(I) cmpop src2

  • src1 with elements of src2 when src1 is constructed from Scalar or has a single element:

dst(I) = src1 cmpop src2(I)

When the comparison result is true, the corresponding element of output array is set to 255.The comparison operations can be replaced with the equivalent matrix expressions:

// C++ code:

Mat dst1 = src1 >= src2;

Mat dst2 = src1 < 8;...

Parameters:
src1 - first input array or a scalar (in the case of cvCmp, cv.Cmp, cvCmpS, cv.CmpS it is always an array); when it is an array, it must have a single channel.
src2 - second input array or a scalar (in the case of cvCmp and cv.Cmp it is always an array; in the case of cvCmpS, cv.CmpS it is always a scalar); when it is an array, it must have a single channel.
dst - output array that has the same size and type as the input arrays.
cmpop - a flag, that specifies correspondence between the arrays:
  • CMP_EQ src1 is equal to src2.
  • CMP_GT src1 is greater than src2.
  • CMP_GE src1 is greater than or equal to src2.
  • CMP_LT src1 is less than src2.
  • CMP_LE src1 is less than or equal to src2.
  • CMP_NE src1 is unequal to src2.
See Also:
org.opencv.core.Core.compare, Imgproc.threshold(org.opencv.core.Mat, org.opencv.core.Mat, double, double, int), max(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat), checkRange(org.opencv.core.Mat, boolean, double, double), min(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat)

completeSymm

public static void completeSymm(Mat mtx)

Copies the lower or the upper half of a square matrix to another half.

The function completeSymm copies the lower half of a square matrix to its another half. The matrix diagonal remains unchanged:

  • mtx_(ij)=mtx_(ji) for i > j if lowerToUpper=false
  • mtx_(ij)=mtx_(ji) for i < j if lowerToUpper=true

Parameters:
mtx - input-output floating-point square matrix.
See Also:
org.opencv.core.Core.completeSymm, transpose(org.opencv.core.Mat, org.opencv.core.Mat), flip(org.opencv.core.Mat, org.opencv.core.Mat, int)

completeSymm

public static void completeSymm(Mat mtx,
                                boolean lowerToUpper)

Copies the lower or the upper half of a square matrix to another half.

The function completeSymm copies the lower half of a square matrix to its another half. The matrix diagonal remains unchanged:

  • mtx_(ij)=mtx_(ji) for i > j if lowerToUpper=false
  • mtx_(ij)=mtx_(ji) for i < j if lowerToUpper=true

Parameters:
mtx - input-output floating-point square matrix.
lowerToUpper - operation flag; if true, the lower half is copied to the upper half. Otherwise, the upper half is copied to the lower half.
See Also:
org.opencv.core.Core.completeSymm, transpose(org.opencv.core.Mat, org.opencv.core.Mat), flip(org.opencv.core.Mat, org.opencv.core.Mat, int)

convertScaleAbs

public static void convertScaleAbs(Mat src,
                                   Mat dst)

Scales, calculates absolute values, and converts the result to 8-bit.

On each element of the input array, the function convertScaleAbs performs three operations sequentially: scaling, taking an absolute value, conversion to an unsigned 8-bit type:

dst(I)= saturate_cast<uchar>(| src(I)* alpha + beta|)<BR>In case of multi-channel arrays, the function processes each channel independently. When the output is not 8-bit, the operation can be emulated by calling the Mat.convertTo method(or by using matrix expressions) and then by calculating an absolute value of the result. For example: <BR><code>

// C++ code:

Mat_ A(30,30);

randu(A, Scalar(-100), Scalar(100));

Mat_ B = A*5 + 3;

B = abs(B);

// Mat_ B = abs(A*5+3) will also do the job,

// but it will allocate a temporary matrix

Parameters:
src - input array.
dst - output array.
See Also:
org.opencv.core.Core.convertScaleAbs, Mat.convertTo(org.opencv.core.Mat, int, double, double)

convertScaleAbs

public static void convertScaleAbs(Mat src,
                                   Mat dst,
                                   double alpha,
                                   double beta)

Scales, calculates absolute values, and converts the result to 8-bit.

On each element of the input array, the function convertScaleAbs performs three operations sequentially: scaling, taking an absolute value, conversion to an unsigned 8-bit type:

dst(I)= saturate_cast<uchar>(| src(I)* alpha + beta|)<BR>In case of multi-channel arrays, the function processes each channel independently. When the output is not 8-bit, the operation can be emulated by calling the Mat.convertTo method(or by using matrix expressions) and then by calculating an absolute value of the result. For example: <BR><code>

// C++ code:

Mat_ A(30,30);

randu(A, Scalar(-100), Scalar(100));

Mat_ B = A*5 + 3;

B = abs(B);

// Mat_ B = abs(A*5+3) will also do the job,

// but it will allocate a temporary matrix

Parameters:
src - input array.
dst - output array.
alpha - optional scale factor.
beta - optional delta added to the scaled values.
See Also:
org.opencv.core.Core.convertScaleAbs, Mat.convertTo(org.opencv.core.Mat, int, double, double)

countNonZero

public static int countNonZero(Mat src)

Counts non-zero array elements.

The function returns the number of non-zero elements in src :

sum(by: I: src(I) != 0) 1

Parameters:
src - single-channel array.
See Also:
org.opencv.core.Core.countNonZero, minMaxLoc(org.opencv.core.Mat, org.opencv.core.Mat), calcCovarMatrix(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int, int), meanStdDev(org.opencv.core.Mat, org.opencv.core.MatOfDouble, org.opencv.core.MatOfDouble, org.opencv.core.Mat), norm(org.opencv.core.Mat, int, org.opencv.core.Mat), mean(org.opencv.core.Mat, org.opencv.core.Mat)

cubeRoot

public static float cubeRoot(float val)

Computes the cube root of an argument.

The function cubeRoot computes sqrt3(val). Negative arguments are handled correctly. NaN and Inf are not handled. The accuracy approaches the maximum possible accuracy for single-precision data.

Parameters:
val - A function argument.
See Also:
org.opencv.core.Core.cubeRoot

dct

public static void dct(Mat src,
                       Mat dst)

Performs a forward or inverse discrete Cosine transform of 1D or 2D array.

The function dct performs a forward or inverse discrete Cosine transform (DCT) of a 1D or 2D floating-point array:

  • Forward Cosine transform of a 1D vector of N elements:

Y = C^N * X

where

C^N_(jk)= sqrt(alpha_j/N) cos((pi(2k+1)j)/(2N))

and

alpha_0=1, alpha_j=2 for *j > 0*.

  • Inverse Cosine transform of a 1D vector of N elements:

X = (C^N)^(-1) * Y = (C^N)^T * Y

(since C^N is an orthogonal matrix, C^N * (C^N)^T = I)

  • Forward 2D Cosine transform of M x N matrix:

Y = C^N * X * (C^N)^T

  • Inverse 2D Cosine transform of M x N matrix:

X = (C^N)^T * X * C^N

The function chooses the mode of operation by looking at the flags and size of the input array:

  • If (flags & DCT_INVERSE) == 0, the function does a forward 1D or 2D transform. Otherwise, it is an inverse 1D or 2D transform.
  • If (flags & DCT_ROWS) != 0, the function performs a 1D transform of each row.
  • If the array is a single column or a single row, the function performs a 1D transform.
  • If none of the above is true, the function performs a 2D transform.

Note:

Currently dct supports even-size arrays (2, 4, 6...). For data analysis and approximation, you can pad the array when necessary.

Also, the function performance depends very much, and not monotonically, on the array size (see"getOptimalDFTSize"). In the current implementation DCT of a vector of size N is calculated via DFT of a vector of size N/2. Thus, the optimal DCT size N1 >= N can be calculated as:

// C++ code:

size_t getOptimalDCTSize(size_t N) { return 2*getOptimalDFTSize((N+1)/2); }

N1 = getOptimalDCTSize(N);

Parameters:
src - input floating-point array.
dst - output array of the same size and type as src.
See Also:
org.opencv.core.Core.dct, dft(org.opencv.core.Mat, org.opencv.core.Mat, int, int), idct(org.opencv.core.Mat, org.opencv.core.Mat, int), getOptimalDFTSize(int)

dct

public static void dct(Mat src,
                       Mat dst,
                       int flags)

Performs a forward or inverse discrete Cosine transform of 1D or 2D array.

The function dct performs a forward or inverse discrete Cosine transform (DCT) of a 1D or 2D floating-point array:

  • Forward Cosine transform of a 1D vector of N elements:

Y = C^N * X

where

C^N_(jk)= sqrt(alpha_j/N) cos((pi(2k+1)j)/(2N))

and

alpha_0=1, alpha_j=2 for *j > 0*.

  • Inverse Cosine transform of a 1D vector of N elements:

X = (C^N)^(-1) * Y = (C^N)^T * Y

(since C^N is an orthogonal matrix, C^N * (C^N)^T = I)

  • Forward 2D Cosine transform of M x N matrix:

Y = C^N * X * (C^N)^T

  • Inverse 2D Cosine transform of M x N matrix:

X = (C^N)^T * X * C^N

The function chooses the mode of operation by looking at the flags and size of the input array:

  • If (flags & DCT_INVERSE) == 0, the function does a forward 1D or 2D transform. Otherwise, it is an inverse 1D or 2D transform.
  • If (flags & DCT_ROWS) != 0, the function performs a 1D transform of each row.
  • If the array is a single column or a single row, the function performs a 1D transform.
  • If none of the above is true, the function performs a 2D transform.

Note:

Currently dct supports even-size arrays (2, 4, 6...). For data analysis and approximation, you can pad the array when necessary.

Also, the function performance depends very much, and not monotonically, on the array size (see"getOptimalDFTSize"). In the current implementation DCT of a vector of size N is calculated via DFT of a vector of size N/2. Thus, the optimal DCT size N1 >= N can be calculated as:

// C++ code:

size_t getOptimalDCTSize(size_t N) { return 2*getOptimalDFTSize((N+1)/2); }

N1 = getOptimalDCTSize(N);

Parameters:
src - input floating-point array.
dst - output array of the same size and type as src.
flags - transformation flags as a combination of the following values:
  • DCT_INVERSE performs an inverse 1D or 2D transform instead of the default forward transform.
  • DCT_ROWS performs a forward or inverse transform of every individual row of the input matrix. This flag enables you to transform multiple vectors simultaneously and can be used to decrease the overhead (which is sometimes several times larger than the processing itself) to perform 3D and higher-dimensional transforms and so forth.
See Also:
org.opencv.core.Core.dct, dft(org.opencv.core.Mat, org.opencv.core.Mat, int, int), idct(org.opencv.core.Mat, org.opencv.core.Mat, int), getOptimalDFTSize(int)

determinant

public static double determinant(Mat mtx)

Returns the determinant of a square floating-point matrix.

The function determinant calculates and returns the determinant of the specified matrix. For small matrices (mtx.cols=mtx.rows<=3), the direct method is used. For larger matrices, the function uses LU factorization with partial pivoting.

For symmetric positively-determined matrices, it is also possible to use "eigen" decomposition to calculate the determinant.

Parameters:
mtx - input matrix that must have CV_32FC1 or CV_64FC1 type and square size.
See Also:
org.opencv.core.Core.determinant, invert(org.opencv.core.Mat, org.opencv.core.Mat, int), solve(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), eigen(org.opencv.core.Mat, boolean, org.opencv.core.Mat, org.opencv.core.Mat), trace(org.opencv.core.Mat)

dft

public static void dft(Mat src,
                       Mat dst)

Performs a forward or inverse Discrete Fourier transform of a 1D or 2D floating-point array.

The function performs one of the following:

  • Forward the Fourier transform of a 1D vector of N elements:

Y = F^N * X,

where F^N_(jk)=exp(-2pi i j k/N) and i=sqrt(-1)

  • Inverse the Fourier transform of a 1D vector of N elements:

X'= (F^N)^(-1) * Y = (F^N)^* * y X = (1/N) * X,

where F^*=(Re(F^N)-Im(F^N))^T

  • Forward the 2D Fourier transform of a M x N matrix:

Y = F^M * X * F^N

  • Inverse the 2D Fourier transform of a M x N matrix:

X'= (F^M)^* * Y * (F^N)^* X = 1/(M * N) * X'

In case of real (single-channel) data, the output spectrum of the forward Fourier transform or input spectrum of the inverse Fourier transform can be represented in a packed format called *CCS* (complex-conjugate-symmetrical). It was borrowed from IPL (Intel* Image Processing Library). Here is how 2D *CCS* spectrum looks:

Re Y_(0,0) Re Y_(0,1) Im Y_(0,1) Re Y_(0,2) Im Y_(0,2) *s Re Y_(0,N/2-1) Im Y_(0,N/2-1) Re Y_(0,N/2) Re Y_(1,0) Re Y_(1,1) Im Y_(1,1) Re Y_(1,2) Im Y_(1,2) *s Re Y_(1,N/2-1) Im Y_(1,N/2-1) Re Y_(1,N/2) Im Y_(1,0) Re Y_(2,1) Im Y_(2,1) Re Y_(2,2) Im Y_(2,2) *s Re Y_(2,N/2-1) Im Y_(2,N/2-1) Im Y_(1,N/2)........................... Re Y_(M/2-1,0) Re Y_(M-3,1) Im Y_(M-3,1)......... Re Y_(M-3,N/2-1) Im Y_(M-3,N/2-1) Re Y_(M/2-1,N/2) Im Y_(M/2-1,0) Re Y_(M-2,1) Im Y_(M-2,1)......... Re Y_(M-2,N/2-1) Im Y_(M-2,N/2-1) Im Y_(M/2-1,N/2) Re Y_(M/2,0) Re Y_(M-1,1) Im Y_(M-1,1)......... Re Y_(M-1,N/2-1) Im Y_(M-1,N/2-1) Re Y_(M/2,N/2)

In case of 1D transform of a real vector, the output looks like the first row of the matrix above.

So, the function chooses an operation mode depending on the flags and size of the input array:

  • If DFT_ROWS is set or the input array has a single row or single column, the function performs a 1D forward or inverse transform of each row of a matrix when DFT_ROWS is set. Otherwise, it performs a 2D transform.
  • If the input array is real and DFT_INVERSE is not set, the function performs a forward 1D or 2D transform:
  • When DFT_COMPLEX_OUTPUT is set, the output is a complex matrix of the same size as input.
  • When DFT_COMPLEX_OUTPUT is not set, the output is a real matrix of the same size as input. In case of 2D transform, it uses the packed format as shown above. In case of a single 1D transform, it looks like the first row of the matrix above. In case of multiple 1D transforms (when using the DCT_ROWS flag), each row of the output matrix looks like the first row of the matrix above.
  • If the input array is complex and either DFT_INVERSE or DFT_REAL_OUTPUT are not set, the output is a complex array of the same size as input. The function performs a forward or inverse 1D or 2D transform of the whole input array or each row of the input array independently, depending on the flags DFT_INVERSE and DFT_ROWS.
  • When DFT_INVERSE is set and the input array is real, or it is complex but DFT_REAL_OUTPUT is set, the output is a real array of the same size as input. The function performs a 1D or 2D inverse transformation of the whole input array or each individual row, depending on the flags DFT_INVERSE and DFT_ROWS.

If DFT_SCALE is set, the scaling is done after the transformation.

Unlike "dct", the function supports arrays of arbitrary size. But only those arrays are processed efficiently, whose sizes can be factorized in a product of small prime numbers (2, 3, and 5 in the current implementation). Such an efficient DFT size can be calculated using the "getOptimalDFTSize" method. The sample below illustrates how to calculate a DFT-based convolution of two 2D real arrays:

// C++ code:

void convolveDFT(InputArray A, InputArray B, OutputArray C)

// reallocate the output array if needed

C.create(abs(A.rows - B.rows)+1, abs(A.cols - B.cols)+1, A.type());

Size dftSize;

// calculate the size of DFT transform

dftSize.width = getOptimalDFTSize(A.cols + B.cols - 1);

dftSize.height = getOptimalDFTSize(A.rows + B.rows - 1);

// allocate temporary buffers and initialize them with 0's

Mat tempA(dftSize, A.type(), Scalar.all(0));

Mat tempB(dftSize, B.type(), Scalar.all(0));

// copy A and B to the top-left corners of tempA and tempB, respectively

Mat roiA(tempA, Rect(0,0,A.cols,A.rows));

A.copyTo(roiA);

Mat roiB(tempB, Rect(0,0,B.cols,B.rows));

B.copyTo(roiB);

// now transform the padded A & B in-place;

// use "nonzeroRows" hint for faster processing

dft(tempA, tempA, 0, A.rows);

dft(tempB, tempB, 0, B.rows);

// multiply the spectrums;

// the function handles packed spectrum representations well

mulSpectrums(tempA, tempB, tempA);

// transform the product back from the frequency domain.

// Even though all the result rows will be non-zero,

// you need only the first C.rows of them, and thus you

// pass nonzeroRows == C.rows

dft(tempA, tempA, DFT_INVERSE + DFT_SCALE, C.rows);

// now copy the result back to C.

tempA(Rect(0, 0, C.cols, C.rows)).copyTo(C);

// all the temporary buffers will be deallocated automatically

To optimize this sample, consider the following approaches:

  • Since nonzeroRows != 0 is passed to the forward transform calls and since A and B are copied to the top-left corners of tempA and tempB, respectively, it is not necessary to clear the whole tempA and tempB. It is only necessary to clear the tempA.cols - A.cols (tempB.cols - B.cols) rightmost columns of the matrices.
  • This DFT-based convolution does not have to be applied to the whole big arrays, especially if B is significantly smaller than A or vice versa. Instead, you can calculate convolution by parts. To do this, you need to split the output array C into multiple tiles. For each tile, estimate which parts of A and B are required to calculate convolution in this tile. If the tiles in C are too small, the speed will decrease a lot because of repeated work. In the ultimate case, when each tile in C is a single pixel, the algorithm becomes equivalent to the naive convolution algorithm. If the tiles are too big, the temporary arrays tempA and tempB become too big and there is also a slowdown because of bad cache locality. So, there is an optimal tile size somewhere in the middle.
  • If different tiles in C can be calculated in parallel and, thus, the convolution is done by parts, the loop can be threaded.

All of the above improvements have been implemented in "matchTemplate" and "filter2D". Therefore, by using them, you can get the performance even better than with the above theoretically optimal implementation. Though, those two functions actually calculate cross-correlation, not convolution, so you need to "flip" the second convolution operand B vertically and horizontally using "flip".

Note:

  • An example using the discrete fourier transform can be found at opencv_source_code/samples/cpp/dft.cpp
  • (Python) An example using the dft functionality to perform Wiener deconvolution can be found at opencv_source/samples/python2/deconvolution.py
  • (Python) An example rearranging the quadrants of a Fourier image can be found at opencv_source/samples/python2/dft.py

Parameters:
src - input array that could be real or complex.
dst - output array whose size and type depends on the flags.
See Also:
org.opencv.core.Core.dft, Imgproc.matchTemplate(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), mulSpectrums(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int, boolean), cartToPolar(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, boolean), flip(org.opencv.core.Mat, org.opencv.core.Mat, int), magnitude(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat), phase(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, boolean), dct(org.opencv.core.Mat, org.opencv.core.Mat, int), Imgproc.filter2D(org.opencv.core.Mat, org.opencv.core.Mat, int, org.opencv.core.Mat, org.opencv.core.Point, double, int), getOptimalDFTSize(int)

dft

public static void dft(Mat src,
                       Mat dst,
                       int flags,
                       int nonzeroRows)

Performs a forward or inverse Discrete Fourier transform of a 1D or 2D floating-point array.

The function performs one of the following:

  • Forward the Fourier transform of a 1D vector of N elements:

Y = F^N * X,

where F^N_(jk)=exp(-2pi i j k/N) and i=sqrt(-1)

  • Inverse the Fourier transform of a 1D vector of N elements:

X'= (F^N)^(-1) * Y = (F^N)^* * y X = (1/N) * X,

where F^*=(Re(F^N)-Im(F^N))^T

  • Forward the 2D Fourier transform of a M x N matrix:

Y = F^M * X * F^N

  • Inverse the 2D Fourier transform of a M x N matrix:

X'= (F^M)^* * Y * (F^N)^* X = 1/(M * N) * X'

In case of real (single-channel) data, the output spectrum of the forward Fourier transform or input spectrum of the inverse Fourier transform can be represented in a packed format called *CCS* (complex-conjugate-symmetrical). It was borrowed from IPL (Intel* Image Processing Library). Here is how 2D *CCS* spectrum looks:

Re Y_(0,0) Re Y_(0,1) Im Y_(0,1) Re Y_(0,2) Im Y_(0,2) *s Re Y_(0,N/2-1) Im Y_(0,N/2-1) Re Y_(0,N/2) Re Y_(1,0) Re Y_(1,1) Im Y_(1,1) Re Y_(1,2) Im Y_(1,2) *s Re Y_(1,N/2-1) Im Y_(1,N/2-1) Re Y_(1,N/2) Im Y_(1,0) Re Y_(2,1) Im Y_(2,1) Re Y_(2,2) Im Y_(2,2) *s Re Y_(2,N/2-1) Im Y_(2,N/2-1) Im Y_(1,N/2)........................... Re Y_(M/2-1,0) Re Y_(M-3,1) Im Y_(M-3,1)......... Re Y_(M-3,N/2-1) Im Y_(M-3,N/2-1) Re Y_(M/2-1,N/2) Im Y_(M/2-1,0) Re Y_(M-2,1) Im Y_(M-2,1)......... Re Y_(M-2,N/2-1) Im Y_(M-2,N/2-1) Im Y_(M/2-1,N/2) Re Y_(M/2,0) Re Y_(M-1,1) Im Y_(M-1,1)......... Re Y_(M-1,N/2-1) Im Y_(M-1,N/2-1) Re Y_(M/2,N/2)

In case of 1D transform of a real vector, the output looks like the first row of the matrix above.

So, the function chooses an operation mode depending on the flags and size of the input array:

  • If DFT_ROWS is set or the input array has a single row or single column, the function performs a 1D forward or inverse transform of each row of a matrix when DFT_ROWS is set. Otherwise, it performs a 2D transform.
  • If the input array is real and DFT_INVERSE is not set, the function performs a forward 1D or 2D transform:
  • When DFT_COMPLEX_OUTPUT is set, the output is a complex matrix of the same size as input.
  • When DFT_COMPLEX_OUTPUT is not set, the output is a real matrix of the same size as input. In case of 2D transform, it uses the packed format as shown above. In case of a single 1D transform, it looks like the first row of the matrix above. In case of multiple 1D transforms (when using the DCT_ROWS flag), each row of the output matrix looks like the first row of the matrix above.
  • If the input array is complex and either DFT_INVERSE or DFT_REAL_OUTPUT are not set, the output is a complex array of the same size as input. The function performs a forward or inverse 1D or 2D transform of the whole input array or each row of the input array independently, depending on the flags DFT_INVERSE and DFT_ROWS.
  • When DFT_INVERSE is set and the input array is real, or it is complex but DFT_REAL_OUTPUT is set, the output is a real array of the same size as input. The function performs a 1D or 2D inverse transformation of the whole input array or each individual row, depending on the flags DFT_INVERSE and DFT_ROWS.

If DFT_SCALE is set, the scaling is done after the transformation.

Unlike "dct", the function supports arrays of arbitrary size. But only those arrays are processed efficiently, whose sizes can be factorized in a product of small prime numbers (2, 3, and 5 in the current implementation). Such an efficient DFT size can be calculated using the "getOptimalDFTSize" method. The sample below illustrates how to calculate a DFT-based convolution of two 2D real arrays:

// C++ code:

void convolveDFT(InputArray A, InputArray B, OutputArray C)

// reallocate the output array if needed

C.create(abs(A.rows - B.rows)+1, abs(A.cols - B.cols)+1, A.type());

Size dftSize;

// calculate the size of DFT transform

dftSize.width = getOptimalDFTSize(A.cols + B.cols - 1);

dftSize.height = getOptimalDFTSize(A.rows + B.rows - 1);

// allocate temporary buffers and initialize them with 0's

Mat tempA(dftSize, A.type(), Scalar.all(0));

Mat tempB(dftSize, B.type(), Scalar.all(0));

// copy A and B to the top-left corners of tempA and tempB, respectively

Mat roiA(tempA, Rect(0,0,A.cols,A.rows));

A.copyTo(roiA);

Mat roiB(tempB, Rect(0,0,B.cols,B.rows));

B.copyTo(roiB);

// now transform the padded A & B in-place;

// use "nonzeroRows" hint for faster processing

dft(tempA, tempA, 0, A.rows);

dft(tempB, tempB, 0, B.rows);

// multiply the spectrums;

// the function handles packed spectrum representations well

mulSpectrums(tempA, tempB, tempA);

// transform the product back from the frequency domain.

// Even though all the result rows will be non-zero,

// you need only the first C.rows of them, and thus you

// pass nonzeroRows == C.rows

dft(tempA, tempA, DFT_INVERSE + DFT_SCALE, C.rows);

// now copy the result back to C.

tempA(Rect(0, 0, C.cols, C.rows)).copyTo(C);

// all the temporary buffers will be deallocated automatically

To optimize this sample, consider the following approaches:

  • Since nonzeroRows != 0 is passed to the forward transform calls and since A and B are copied to the top-left corners of tempA and tempB, respectively, it is not necessary to clear the whole tempA and tempB. It is only necessary to clear the tempA.cols - A.cols (tempB.cols - B.cols) rightmost columns of the matrices.
  • This DFT-based convolution does not have to be applied to the whole big arrays, especially if B is significantly smaller than A or vice versa. Instead, you can calculate convolution by parts. To do this, you need to split the output array C into multiple tiles. For each tile, estimate which parts of A and B are required to calculate convolution in this tile. If the tiles in C are too small, the speed will decrease a lot because of repeated work. In the ultimate case, when each tile in C is a single pixel, the algorithm becomes equivalent to the naive convolution algorithm. If the tiles are too big, the temporary arrays tempA and tempB become too big and there is also a slowdown because of bad cache locality. So, there is an optimal tile size somewhere in the middle.
  • If different tiles in C can be calculated in parallel and, thus, the convolution is done by parts, the loop can be threaded.

All of the above improvements have been implemented in "matchTemplate" and "filter2D". Therefore, by using them, you can get the performance even better than with the above theoretically optimal implementation. Though, those two functions actually calculate cross-correlation, not convolution, so you need to "flip" the second convolution operand B vertically and horizontally using "flip".

Note:

  • An example using the discrete fourier transform can be found at opencv_source_code/samples/cpp/dft.cpp
  • (Python) An example using the dft functionality to perform Wiener deconvolution can be found at opencv_source/samples/python2/deconvolution.py
  • (Python) An example rearranging the quadrants of a Fourier image can be found at opencv_source/samples/python2/dft.py

Parameters:
src - input array that could be real or complex.
dst - output array whose size and type depends on the flags.
flags - transformation flags, representing a combination of the following values:
  • DFT_INVERSE performs an inverse 1D or 2D transform instead of the default forward transform.
  • DFT_SCALE scales the result: divide it by the number of array elements. Normally, it is combined with DFT_INVERSE.
  • DFT_ROWS performs a forward or inverse transform of every individual row of the input matrix; this flag enables you to transform multiple vectors simultaneously and can be used to decrease the overhead (which is sometimes several times larger than the processing itself) to perform 3D and higher-dimensional transformations and so forth.
  • DFT_COMPLEX_OUTPUT performs a forward transformation of 1D or 2D real array; the result, though being a complex array, has complex-conjugate symmetry (*CCS*, see the function description below for details), and such an array can be packed into a real array of the same size as input, which is the fastest option and which is what the function does by default; however, you may wish to get a full complex array (for simpler spectrum analysis, and so on) - pass the flag to enable the function to produce a full-size complex output array.
  • DFT_REAL_OUTPUT performs an inverse transformation of a 1D or 2D complex array; the result is normally a complex array of the same size, however, if the input array has conjugate-complex symmetry (for example, it is a result of forward transformation with DFT_COMPLEX_OUTPUT flag), the output is a real array; while the function itself does not check whether the input is symmetrical or not, you can pass the flag and then the function will assume the symmetry and produce the real output array (note that when the input is packed into a real array and inverse transformation is executed, the function treats the input as a packed complex-conjugate symmetrical array, and the output will also be a real array).
nonzeroRows - when the parameter is not zero, the function assumes that only the first nonzeroRows rows of the input array (DFT_INVERSE is not set) or only the first nonzeroRows of the output array (DFT_INVERSE is set) contain non-zeros, thus, the function can handle the rest of the rows more efficiently and save some time; this technique is very useful for calculating array cross-correlation or convolution using DFT.
See Also:
org.opencv.core.Core.dft, Imgproc.matchTemplate(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), mulSpectrums(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int, boolean), cartToPolar(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, boolean), flip(org.opencv.core.Mat, org.opencv.core.Mat, int), magnitude(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat), phase(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, boolean), dct(org.opencv.core.Mat, org.opencv.core.Mat, int), Imgproc.filter2D(org.opencv.core.Mat, org.opencv.core.Mat, int, org.opencv.core.Mat, org.opencv.core.Point, double, int), getOptimalDFTSize(int)

divide

public static void divide(double scale,
                          Mat src2,
                          Mat dst)

Performs per-element division of two arrays or a scalar by an array.

The functions divide divide one array by another:

dst(I) = saturate(src1(I)*scale/src2(I))

or a scalar by an array when there is no src1 :

dst(I) = saturate(scale/src2(I))

When src2(I) is zero, dst(I) will also be zero. Different channels of multi-channel arrays are processed independently.

Note: Saturation is not applied when the output array has the depth CV_32S. You may even get result of an incorrect sign in the case of overflow.

Parameters:
scale - scalar factor.
src2 - second input array of the same size and type as src1.
dst - output array of the same size and type as src2.
See Also:
org.opencv.core.Core.divide, multiply(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, double, int), add(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), subtract(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int)

divide

public static void divide(double scale,
                          Mat src2,
                          Mat dst,
                          int dtype)

Performs per-element division of two arrays or a scalar by an array.

The functions divide divide one array by another:

dst(I) = saturate(src1(I)*scale/src2(I))

or a scalar by an array when there is no src1 :

dst(I) = saturate(scale/src2(I))

When src2(I) is zero, dst(I) will also be zero. Different channels of multi-channel arrays are processed independently.

Note: Saturation is not applied when the output array has the depth CV_32S. You may even get result of an incorrect sign in the case of overflow.

Parameters:
scale - scalar factor.
src2 - second input array of the same size and type as src1.
dst - output array of the same size and type as src2.
dtype - optional depth of the output array; if -1, dst will have depth src2.depth(), but in case of an array-by-array division, you can only pass -1 when src1.depth()==src2.depth().
See Also:
org.opencv.core.Core.divide, multiply(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, double, int), add(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), subtract(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int)

divide

public static void divide(Mat src1,
                          Mat src2,
                          Mat dst)

Performs per-element division of two arrays or a scalar by an array.

The functions divide divide one array by another:

dst(I) = saturate(src1(I)*scale/src2(I))

or a scalar by an array when there is no src1 :

dst(I) = saturate(scale/src2(I))

When src2(I) is zero, dst(I) will also be zero. Different channels of multi-channel arrays are processed independently.

Note: Saturation is not applied when the output array has the depth CV_32S. You may even get result of an incorrect sign in the case of overflow.

Parameters:
src1 - first input array.
src2 - second input array of the same size and type as src1.
dst - output array of the same size and type as src2.
See Also:
org.opencv.core.Core.divide, multiply(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, double, int), add(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), subtract(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int)

divide

public static void divide(Mat src1,
                          Mat src2,
                          Mat dst,
                          double scale)

Performs per-element division of two arrays or a scalar by an array.

The functions divide divide one array by another:

dst(I) = saturate(src1(I)*scale/src2(I))

or a scalar by an array when there is no src1 :

dst(I) = saturate(scale/src2(I))

When src2(I) is zero, dst(I) will also be zero. Different channels of multi-channel arrays are processed independently.

Note: Saturation is not applied when the output array has the depth CV_32S. You may even get result of an incorrect sign in the case of overflow.

Parameters:
src1 - first input array.
src2 - second input array of the same size and type as src1.
dst - output array of the same size and type as src2.
scale - scalar factor.
See Also:
org.opencv.core.Core.divide, multiply(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, double, int), add(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), subtract(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int)

divide

public static void divide(Mat src1,
                          Mat src2,
                          Mat dst,
                          double scale,
                          int dtype)

Performs per-element division of two arrays or a scalar by an array.

The functions divide divide one array by another:

dst(I) = saturate(src1(I)*scale/src2(I))

or a scalar by an array when there is no src1 :

dst(I) = saturate(scale/src2(I))

When src2(I) is zero, dst(I) will also be zero. Different channels of multi-channel arrays are processed independently.

Note: Saturation is not applied when the output array has the depth CV_32S. You may even get result of an incorrect sign in the case of overflow.

Parameters:
src1 - first input array.
src2 - second input array of the same size and type as src1.
dst - output array of the same size and type as src2.
scale - scalar factor.
dtype - optional depth of the output array; if -1, dst will have depth src2.depth(), but in case of an array-by-array division, you can only pass -1 when src1.depth()==src2.depth().
See Also:
org.opencv.core.Core.divide, multiply(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, double, int), add(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), subtract(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int)

divide

public static void divide(Mat src1,
                          Scalar src2,
                          Mat dst)

Performs per-element division of two arrays or a scalar by an array.

The functions divide divide one array by another:

dst(I) = saturate(src1(I)*scale/src2(I))

or a scalar by an array when there is no src1 :

dst(I) = saturate(scale/src2(I))

When src2(I) is zero, dst(I) will also be zero. Different channels of multi-channel arrays are processed independently.

Note: Saturation is not applied when the output array has the depth CV_32S. You may even get result of an incorrect sign in the case of overflow.

Parameters:
src1 - first input array.
src2 - second input array of the same size and type as src1.
dst - output array of the same size and type as src2.
See Also:
org.opencv.core.Core.divide, multiply(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, double, int), add(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), subtract(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int)

divide

public static void divide(Mat src1,
                          Scalar src2,
                          Mat dst,
                          double scale)

Performs per-element division of two arrays or a scalar by an array.

The functions divide divide one array by another:

dst(I) = saturate(src1(I)*scale/src2(I))

or a scalar by an array when there is no src1 :

dst(I) = saturate(scale/src2(I))

When src2(I) is zero, dst(I) will also be zero. Different channels of multi-channel arrays are processed independently.

Note: Saturation is not applied when the output array has the depth CV_32S. You may even get result of an incorrect sign in the case of overflow.

Parameters:
src1 - first input array.
src2 - second input array of the same size and type as src1.
dst - output array of the same size and type as src2.
scale - scalar factor.
See Also:
org.opencv.core.Core.divide, multiply(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, double, int), add(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), subtract(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int)

divide

public static void divide(Mat src1,
                          Scalar src2,
                          Mat dst,
                          double scale,
                          int dtype)

Performs per-element division of two arrays or a scalar by an array.

The functions divide divide one array by another:

dst(I) = saturate(src1(I)*scale/src2(I))

or a scalar by an array when there is no src1 :

dst(I) = saturate(scale/src2(I))

When src2(I) is zero, dst(I) will also be zero. Different channels of multi-channel arrays are processed independently.

Note: Saturation is not applied when the output array has the depth CV_32S. You may even get result of an incorrect sign in the case of overflow.

Parameters:
src1 - first input array.
src2 - second input array of the same size and type as src1.
dst - output array of the same size and type as src2.
scale - scalar factor.
dtype - optional depth of the output array; if -1, dst will have depth src2.depth(), but in case of an array-by-array division, you can only pass -1 when src1.depth()==src2.depth().
See Also:
org.opencv.core.Core.divide, multiply(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, double, int), add(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), subtract(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int)

eigen

public static boolean eigen(Mat src,
                            boolean computeEigenvectors,
                            Mat eigenvalues,
                            Mat eigenvectors)

Calculates eigenvalues and eigenvectors of a symmetric matrix.

The functions eigen calculate just eigenvalues, or eigenvalues and eigenvectors of the symmetric matrix src :

// C++ code:

src*eigenvectors.row(i).t() = eigenvalues.at(i)*eigenvectors.row(i).t()

Note: in the new and the old interfaces different ordering of eigenvalues and eigenvectors parameters is used.

Parameters:
src - input matrix that must have CV_32FC1 or CV_64FC1 type, square size and be symmetrical (src^"T" == src).
computeEigenvectors - a computeEigenvectors
eigenvalues - output vector of eigenvalues of the same type as src; the eigenvalues are stored in the descending order.
eigenvectors - output matrix of eigenvectors; it has the same size and type as src; the eigenvectors are stored as subsequent matrix rows, in the same order as the corresponding eigenvalues.
See Also:
org.opencv.core.Core.eigen, completeSymm(org.opencv.core.Mat, boolean)

ellipse

public static void ellipse(Mat img,
                           Point center,
                           Size axes,
                           double angle,
                           double startAngle,
                           double endAngle,
                           Scalar color)

Draws a simple or thick elliptic arc or fills an ellipse sector.

The functions ellipse with less parameters draw an ellipse outline, a filled ellipse, an elliptic arc, or a filled ellipse sector. A piecewise-linear curve is used to approximate the elliptic arc boundary. If you need more control of the ellipse rendering, you can retrieve the curve using "ellipse2Poly" and then render it with "polylines" or fill it with "fillPoly". If you use the first variant of the function and want to draw the whole ellipse, not an arc, pass startAngle=0 and endAngle=360. The figure below explains the meaning of the parameters. Figure 1. Parameters of Elliptic Arc

Parameters:
img - Image.
center - Center of the ellipse.
axes - Half of the size of the ellipse main axes.
angle - Ellipse rotation angle in degrees.
startAngle - Starting angle of the elliptic arc in degrees.
endAngle - Ending angle of the elliptic arc in degrees.
color - Ellipse color.
See Also:
org.opencv.core.Core.ellipse

ellipse

public static void ellipse(Mat img,
                           Point center,
                           Size axes,
                           double angle,
                           double startAngle,
                           double endAngle,
                           Scalar color,
                           int thickness)

Draws a simple or thick elliptic arc or fills an ellipse sector.

The functions ellipse with less parameters draw an ellipse outline, a filled ellipse, an elliptic arc, or a filled ellipse sector. A piecewise-linear curve is used to approximate the elliptic arc boundary. If you need more control of the ellipse rendering, you can retrieve the curve using "ellipse2Poly" and then render it with "polylines" or fill it with "fillPoly". If you use the first variant of the function and want to draw the whole ellipse, not an arc, pass startAngle=0 and endAngle=360. The figure below explains the meaning of the parameters. Figure 1. Parameters of Elliptic Arc

Parameters:
img - Image.
center - Center of the ellipse.
axes - Half of the size of the ellipse main axes.
angle - Ellipse rotation angle in degrees.
startAngle - Starting angle of the elliptic arc in degrees.
endAngle - Ending angle of the elliptic arc in degrees.
color - Ellipse color.
thickness - Thickness of the ellipse arc outline, if positive. Otherwise, this indicates that a filled ellipse sector is to be drawn.
See Also:
org.opencv.core.Core.ellipse

ellipse

public static void ellipse(Mat img,
                           Point center,
                           Size axes,
                           double angle,
                           double startAngle,
                           double endAngle,
                           Scalar color,
                           int thickness,
                           int lineType,
                           int shift)

Draws a simple or thick elliptic arc or fills an ellipse sector.

The functions ellipse with less parameters draw an ellipse outline, a filled ellipse, an elliptic arc, or a filled ellipse sector. A piecewise-linear curve is used to approximate the elliptic arc boundary. If you need more control of the ellipse rendering, you can retrieve the curve using "ellipse2Poly" and then render it with "polylines" or fill it with "fillPoly". If you use the first variant of the function and want to draw the whole ellipse, not an arc, pass startAngle=0 and endAngle=360. The figure below explains the meaning of the parameters. Figure 1. Parameters of Elliptic Arc

Parameters:
img - Image.
center - Center of the ellipse.
axes - Half of the size of the ellipse main axes.
angle - Ellipse rotation angle in degrees.
startAngle - Starting angle of the elliptic arc in degrees.
endAngle - Ending angle of the elliptic arc in degrees.
color - Ellipse color.
thickness - Thickness of the ellipse arc outline, if positive. Otherwise, this indicates that a filled ellipse sector is to be drawn.
lineType - Type of the ellipse boundary. See the "line" description.
shift - Number of fractional bits in the coordinates of the center and values of axes.
See Also:
org.opencv.core.Core.ellipse

ellipse

public static void ellipse(Mat img,
                           RotatedRect box,
                           Scalar color)

Draws a simple or thick elliptic arc or fills an ellipse sector.

The functions ellipse with less parameters draw an ellipse outline, a filled ellipse, an elliptic arc, or a filled ellipse sector. A piecewise-linear curve is used to approximate the elliptic arc boundary. If you need more control of the ellipse rendering, you can retrieve the curve using "ellipse2Poly" and then render it with "polylines" or fill it with "fillPoly". If you use the first variant of the function and want to draw the whole ellipse, not an arc, pass startAngle=0 and endAngle=360. The figure below explains the meaning of the parameters. Figure 1. Parameters of Elliptic Arc

Parameters:
img - Image.
box - Alternative ellipse representation via "RotatedRect" or CvBox2D. This means that the function draws an ellipse inscribed in the rotated rectangle.
color - Ellipse color.
See Also:
org.opencv.core.Core.ellipse

ellipse

public static void ellipse(Mat img,
                           RotatedRect box,
                           Scalar color,
                           int thickness)

Draws a simple or thick elliptic arc or fills an ellipse sector.

The functions ellipse with less parameters draw an ellipse outline, a filled ellipse, an elliptic arc, or a filled ellipse sector. A piecewise-linear curve is used to approximate the elliptic arc boundary. If you need more control of the ellipse rendering, you can retrieve the curve using "ellipse2Poly" and then render it with "polylines" or fill it with "fillPoly". If you use the first variant of the function and want to draw the whole ellipse, not an arc, pass startAngle=0 and endAngle=360. The figure below explains the meaning of the parameters. Figure 1. Parameters of Elliptic Arc

Parameters:
img - Image.
box - Alternative ellipse representation via "RotatedRect" or CvBox2D. This means that the function draws an ellipse inscribed in the rotated rectangle.
color - Ellipse color.
thickness - Thickness of the ellipse arc outline, if positive. Otherwise, this indicates that a filled ellipse sector is to be drawn.
See Also:
org.opencv.core.Core.ellipse

ellipse

public static void ellipse(Mat img,
                           RotatedRect box,
                           Scalar color,
                           int thickness,
                           int lineType)

Draws a simple or thick elliptic arc or fills an ellipse sector.

The functions ellipse with less parameters draw an ellipse outline, a filled ellipse, an elliptic arc, or a filled ellipse sector. A piecewise-linear curve is used to approximate the elliptic arc boundary. If you need more control of the ellipse rendering, you can retrieve the curve using "ellipse2Poly" and then render it with "polylines" or fill it with "fillPoly". If you use the first variant of the function and want to draw the whole ellipse, not an arc, pass startAngle=0 and endAngle=360. The figure below explains the meaning of the parameters. Figure 1. Parameters of Elliptic Arc

Parameters:
img - Image.
box - Alternative ellipse representation via "RotatedRect" or CvBox2D. This means that the function draws an ellipse inscribed in the rotated rectangle.
color - Ellipse color.
thickness - Thickness of the ellipse arc outline, if positive. Otherwise, this indicates that a filled ellipse sector is to be drawn.
lineType - Type of the ellipse boundary. See the "line" description.
See Also:
org.opencv.core.Core.ellipse

ellipse2Poly

public static void ellipse2Poly(Point center,
                                Size axes,
                                int angle,
                                int arcStart,
                                int arcEnd,
                                int delta,
                                MatOfPoint pts)

Approximates an elliptic arc with a polyline.

The function ellipse2Poly computes the vertices of a polyline that approximates the specified elliptic arc. It is used by "ellipse".

Parameters:
center - Center of the arc.
axes - Half of the size of the ellipse main axes. See the "ellipse" for details.
angle - Rotation angle of the ellipse in degrees. See the "ellipse" for details.
arcStart - Starting angle of the elliptic arc in degrees.
arcEnd - Ending angle of the elliptic arc in degrees.
delta - Angle between the subsequent polyline vertices. It defines the approximation accuracy.
pts - Output vector of polyline vertices.
See Also:
org.opencv.core.Core.ellipse2Poly

exp

public static void exp(Mat src,
                       Mat dst)

Calculates the exponent of every array element.

The function exp calculates the exponent of every element of the input array:

dst [I] = e^(src(I))

The maximum relative error is about 7e-6 for single-precision input and less than 1e-10 for double-precision input. Currently, the function converts denormalized values to zeros on output. Special values (NaN, Inf) are not handled.

Parameters:
src - input array.
dst - output array of the same size and type as src.
See Also:
org.opencv.core.Core.exp, log(org.opencv.core.Mat, org.opencv.core.Mat), cartToPolar(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, boolean), pow(org.opencv.core.Mat, double, org.opencv.core.Mat), sqrt(org.opencv.core.Mat, org.opencv.core.Mat), magnitude(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat), polarToCart(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, boolean), phase(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, boolean)

extractChannel

public static void extractChannel(Mat src,
                                  Mat dst,
                                  int coi)

fastAtan2

public static float fastAtan2(float y,
                              float x)

Calculates the angle of a 2D vector in degrees.

The function fastAtan2 calculates the full-range angle of an input 2D vector. The angle is measured in degrees and varies from 0 to 360 degrees. The accuracy is about 0.3 degrees.

Parameters:
y - y-coordinate of the vector.
x - x-coordinate of the vector.
See Also:
org.opencv.core.Core.fastAtan2

fillConvexPoly

public static void fillConvexPoly(Mat img,
                                  MatOfPoint points,
                                  Scalar color)

Fills a convex polygon.

The function fillConvexPoly draws a filled convex polygon. This function is much faster than the function fillPoly. It can fill not only convex polygons but any monotonic polygon without self-intersections, that is, a polygon whose contour intersects every horizontal line (scan line) twice at the most (though, its top-most and/or the bottom edge could be horizontal).

Parameters:
img - Image.
points - a points
color - Polygon color.
See Also:
org.opencv.core.Core.fillConvexPoly

fillConvexPoly

public static void fillConvexPoly(Mat img,
                                  MatOfPoint points,
                                  Scalar color,
                                  int lineType,
                                  int shift)

Fills a convex polygon.

The function fillConvexPoly draws a filled convex polygon. This function is much faster than the function fillPoly. It can fill not only convex polygons but any monotonic polygon without self-intersections, that is, a polygon whose contour intersects every horizontal line (scan line) twice at the most (though, its top-most and/or the bottom edge could be horizontal).

Parameters:
img - Image.
points - a points
color - Polygon color.
lineType - Type of the polygon boundaries. See the "line" description.
shift - Number of fractional bits in the vertex coordinates.
See Also:
org.opencv.core.Core.fillConvexPoly

fillPoly

public static void fillPoly(Mat img,
                            java.util.List<MatOfPoint> pts,
                            Scalar color)

Fills the area bounded by one or more polygons.

The function fillPoly fills an area bounded by several polygonal contours. The function can fill complex areas, for example, areas with holes, contours with self-intersections (some of their parts), and so forth.

Parameters:
img - Image.
pts - Array of polygons where each polygon is represented as an array of points.
color - Polygon color.
See Also:
org.opencv.core.Core.fillPoly

fillPoly

public static void fillPoly(Mat img,
                            java.util.List<MatOfPoint> pts,
                            Scalar color,
                            int lineType,
                            int shift,
                            Point offset)

Fills the area bounded by one or more polygons.

The function fillPoly fills an area bounded by several polygonal contours. The function can fill complex areas, for example, areas with holes, contours with self-intersections (some of their parts), and so forth.

Parameters:
img - Image.
pts - Array of polygons where each polygon is represented as an array of points.
color - Polygon color.
lineType - Type of the polygon boundaries. See the "line" description.
shift - Number of fractional bits in the vertex coordinates.
offset - Optional offset of all points of the contours.
See Also:
org.opencv.core.Core.fillPoly

findNonZero

public static void findNonZero(Mat src,
                               Mat idx)

flip

public static void flip(Mat src,
                        Mat dst,
                        int flipCode)

Flips a 2D array around vertical, horizontal, or both axes.

The function flip flips the array in one of three different ways (row and column indices are 0-based):

dst _(ij) =<BR> <= ft(<BR> ltBR gtsrc _(src.rows-i-1,j) if flipCode = 0 ltBR gtsrc _(i, src.cols -j-1) if flipCode gt 0 ltBR gtsrc _(src.rows -i-1, src.cols -j-1) if flipCode lt 0 ltBR gt<BR>right.

The example scenarios of using the function are the following:

  • Vertical flipping of the image (flipCode == 0) to switch between top-left and bottom-left image origin. This is a typical operation in video processing on Microsoft Windows* OS.
  • Horizontal flipping of the image with the subsequent horizontal shift and absolute difference calculation to check for a vertical-axis symmetry (flipCode > 0).
  • Simultaneous horizontal and vertical flipping of the image with the subsequent shift and absolute difference calculation to check for a central symmetry (flipCode < 0).
  • Reversing the order of point arrays (flipCode > 0 or flipCode == 0).

Parameters:
src - input array.
dst - output array of the same size and type as src.
flipCode - a flag to specify how to flip the array; 0 means flipping around the x-axis and positive value (for example, 1) means flipping around y-axis. Negative value (for example, -1) means flipping around both axes (see the discussion below for the formulas).
See Also:
org.opencv.core.Core.flip, repeat(org.opencv.core.Mat, int, int, org.opencv.core.Mat), transpose(org.opencv.core.Mat, org.opencv.core.Mat), completeSymm(org.opencv.core.Mat, boolean)

gemm

public static void gemm(Mat src1,
                        Mat src2,
                        double alpha,
                        Mat src3,
                        double gamma,
                        Mat dst)

Performs generalized matrix multiplication.

The function performs generalized matrix multiplication similar to the gemm functions in BLAS level 3. For example, gemm(src1, src2, alpha, src3, beta, dst, GEMM_1_T + GEMM_3_T) corresponds to

dst = alpha * src1 ^T * src2 + beta * src3 ^T<BR>The function can be replaced with a matrix expression. For example, the above call can be replaced with: <BR><code>

// C++ code:

dst = alpha*src1.t()*src2 + beta*src3.t();

Parameters:
src1 - first multiplied input matrix that should have CV_32FC1, CV_64FC1, CV_32FC2, or CV_64FC2 type.
src2 - second multiplied input matrix of the same type as src1.
alpha - weight of the matrix product.
src3 - third optional delta matrix added to the matrix product; it should have the same type as src1 and src2.
gamma - a gamma
dst - output matrix; it has the proper size and the same type as input matrices.
See Also:
org.opencv.core.Core.gemm, mulTransposed(org.opencv.core.Mat, org.opencv.core.Mat, boolean, org.opencv.core.Mat, double, int), transform(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat)

gemm

public static void gemm(Mat src1,
                        Mat src2,
                        double alpha,
                        Mat src3,
                        double gamma,
                        Mat dst,
                        int flags)

Performs generalized matrix multiplication.

The function performs generalized matrix multiplication similar to the gemm functions in BLAS level 3. For example, gemm(src1, src2, alpha, src3, beta, dst, GEMM_1_T + GEMM_3_T) corresponds to

dst = alpha * src1 ^T * src2 + beta * src3 ^T<BR>The function can be replaced with a matrix expression. For example, the above call can be replaced with: <BR><code>

// C++ code:

dst = alpha*src1.t()*src2 + beta*src3.t();

Parameters:
src1 - first multiplied input matrix that should have CV_32FC1, CV_64FC1, CV_32FC2, or CV_64FC2 type.
src2 - second multiplied input matrix of the same type as src1.
alpha - weight of the matrix product.
src3 - third optional delta matrix added to the matrix product; it should have the same type as src1 and src2.
gamma - a gamma
dst - output matrix; it has the proper size and the same type as input matrices.
flags - operation flags:
  • GEMM_1_T transposes src1.
  • GEMM_2_T transposes src2.
  • GEMM_3_T transposes src3.
See Also:
org.opencv.core.Core.gemm, mulTransposed(org.opencv.core.Mat, org.opencv.core.Mat, boolean, org.opencv.core.Mat, double, int), transform(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat)

getBuildInformation

public static java.lang.String getBuildInformation()

Returns full configuration time cmake output.

Returned value is raw cmake output including version control system revision, compiler version, compiler flags, enabled modules and third party libraries, etc. Output format depends on target architecture.

See Also:
org.opencv.core.Core.getBuildInformation

getCPUTickCount

public static long getCPUTickCount()

Returns the number of CPU ticks.

The function returns the current number of CPU ticks on some architectures (such as x86, x64, PowerPC). On other platforms the function is equivalent to getTickCount. It can also be used for very accurate time measurements, as well as for RNG initialization. Note that in case of multi-CPU systems a thread, from which getCPUTickCount is called, can be suspended and resumed at another CPU with its own counter. So, theoretically (and practically) the subsequent calls to the function do not necessary return the monotonously increasing values. Also, since a modern CPU varies the CPU frequency depending on the load, the number of CPU clocks spent in some code cannot be directly converted to time units. Therefore, getTickCount is generally a preferable solution for measuring execution time.

See Also:
org.opencv.core.Core.getCPUTickCount

getNumberOfCPUs

public static int getNumberOfCPUs()

Returns the number of logical CPUs available for the process.

See Also:
org.opencv.core.Core.getNumberOfCPUs

getOptimalDFTSize

public static int getOptimalDFTSize(int vecsize)

Returns the optimal DFT size for a given vector size.

DFT performance is not a monotonic function of a vector size. Therefore, when you calculate convolution of two arrays or perform the spectral analysis of an array, it usually makes sense to pad the input data with zeros to get a bit larger array that can be transformed much faster than the original one. Arrays whose size is a power-of-two (2, 4, 8, 16, 32,...) are the fastest to process. Though, the arrays whose size is a product of 2's, 3's, and 5's (for example, 300 = 5*5*3*2*2) are also processed quite efficiently.

The function getOptimalDFTSize returns the minimum number N that is greater than or equal to vecsize so that the DFT of a vector of size N can be processed efficiently. In the current implementation N = 2^"p" * 3^"q" * 5^"r" for some integer p, q, r.

The function returns a negative number if vecsize is too large (very close to INT_MAX).

While the function cannot be used directly to estimate the optimal vector size for DCT transform (since the current DCT implementation supports only even-size vectors), it can be easily processed as getOptimalDFTSize((vecsize+1)/2)*2.

Parameters:
vecsize - vector size.
See Also:
org.opencv.core.Core.getOptimalDFTSize, dft(org.opencv.core.Mat, org.opencv.core.Mat, int, int), dct(org.opencv.core.Mat, org.opencv.core.Mat, int), idct(org.opencv.core.Mat, org.opencv.core.Mat, int), mulSpectrums(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int, boolean), idft(org.opencv.core.Mat, org.opencv.core.Mat, int, int)

getTextSize

public static Size getTextSize(java.lang.String text,
                               int fontFace,
                               double fontScale,
                               int thickness,
                               int[] baseLine)

Calculates the width and height of a text string.

The function getTextSize calculates and returns the size of a box that contains the specified text.That is, the following code renders some text, the tight box surrounding it, and the baseline:

// C++ code:

string text = "Funny text inside the box";

int fontFace = FONT_HERSHEY_SCRIPT_SIMPLEX;

double fontScale = 2;

int thickness = 3;

Mat img(600, 800, CV_8UC3, Scalar.all(0));

int baseline=0;

Size textSize = getTextSize(text, fontFace,

fontScale, thickness, &baseline);

baseline += thickness;

// center the text

Point textOrg((img.cols - textSize.width)/2,

(img.rows + textSize.height)/2);

// draw the box

rectangle(img, textOrg + Point(0, baseline),

textOrg + Point(textSize.width, -textSize.height),

Scalar(0,0,255));

//... and the baseline first

line(img, textOrg + Point(0, thickness),

textOrg + Point(textSize.width, thickness),

Scalar(0, 0, 255));

// then put the text itself

putText(img, text, textOrg, fontFace, fontScale,

Scalar.all(255), thickness, 8);

Parameters:
text - Input text string.
fontFace - Font to use. See the "putText" for details.
fontScale - Font scale. See the "putText" for details.
thickness - Thickness of lines used to render the text. See "putText" for details.
baseLine - Output parameter - y-coordinate of the baseline relative to the bottom-most text point.
See Also:
org.opencv.core.Core.getTextSize

getTickCount

public static long getTickCount()

Returns the number of ticks.

The function returns the number of ticks after the certain event (for example, when the machine was turned on). It can be used to initialize "RNG" or to measure a function execution time by reading the tick count before and after the function call. See also the tick frequency.

See Also:
org.opencv.core.Core.getTickCount

getTickFrequency

public static double getTickFrequency()

Returns the number of ticks per second.

The function returns the number of ticks per second.That is, the following code computes the execution time in seconds:

// C++ code:

double t = (double)getTickCount();

// do something...

t = ((double)getTickCount() - t)/getTickFrequency();

See Also:
org.opencv.core.Core.getTickFrequency

hconcat

public static void hconcat(java.util.List<Mat> src,
                           Mat dst)

idct

public static void idct(Mat src,
                        Mat dst)

Calculates the inverse Discrete Cosine Transform of a 1D or 2D array.

idct(src, dst, flags) is equivalent to dct(src, dst, flags | DCT_INVERSE).

Parameters:
src - input floating-point single-channel array.
dst - output array of the same size and type as src.
See Also:
org.opencv.core.Core.idct, dft(org.opencv.core.Mat, org.opencv.core.Mat, int, int), dct(org.opencv.core.Mat, org.opencv.core.Mat, int), getOptimalDFTSize(int), idft(org.opencv.core.Mat, org.opencv.core.Mat, int, int)

idct

public static void idct(Mat src,
                        Mat dst,
                        int flags)

Calculates the inverse Discrete Cosine Transform of a 1D or 2D array.

idct(src, dst, flags) is equivalent to dct(src, dst, flags | DCT_INVERSE).

Parameters:
src - input floating-point single-channel array.
dst - output array of the same size and type as src.
flags - operation flags.
See Also:
org.opencv.core.Core.idct, dft(org.opencv.core.Mat, org.opencv.core.Mat, int, int), dct(org.opencv.core.Mat, org.opencv.core.Mat, int), getOptimalDFTSize(int), idft(org.opencv.core.Mat, org.opencv.core.Mat, int, int)

idft

public static void idft(Mat src,
                        Mat dst)

Calculates the inverse Discrete Fourier Transform of a 1D or 2D array.

idft(src, dst, flags) is equivalent to dft(src, dst, flags | DFT_INVERSE).

See "dft" for details.

Note: None of dft and idft scales the result by default. So, you should pass DFT_SCALE to one of dft or idft explicitly to make these transforms mutually inverse.

Parameters:
src - input floating-point real or complex array.
dst - output array whose size and type depend on the flags.
See Also:
org.opencv.core.Core.idft, dft(org.opencv.core.Mat, org.opencv.core.Mat, int, int), dct(org.opencv.core.Mat, org.opencv.core.Mat, int), getOptimalDFTSize(int), idct(org.opencv.core.Mat, org.opencv.core.Mat, int), mulSpectrums(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int, boolean)

idft

public static void idft(Mat src,
                        Mat dst,
                        int flags,
                        int nonzeroRows)

Calculates the inverse Discrete Fourier Transform of a 1D or 2D array.

idft(src, dst, flags) is equivalent to dft(src, dst, flags | DFT_INVERSE).

See "dft" for details.

Note: None of dft and idft scales the result by default. So, you should pass DFT_SCALE to one of dft or idft explicitly to make these transforms mutually inverse.

Parameters:
src - input floating-point real or complex array.
dst - output array whose size and type depend on the flags.
flags - operation flags (see "dft").
nonzeroRows - number of dst rows to process; the rest of the rows have undefined content (see the convolution sample in "dft" description.
See Also:
org.opencv.core.Core.idft, dft(org.opencv.core.Mat, org.opencv.core.Mat, int, int), dct(org.opencv.core.Mat, org.opencv.core.Mat, int), getOptimalDFTSize(int), idct(org.opencv.core.Mat, org.opencv.core.Mat, int), mulSpectrums(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int, boolean)

inRange

public static void inRange(Mat src,
                           Scalar lowerb,
                           Scalar upperb,
                           Mat dst)

Checks if array elements lie between the elements of two other arrays.

The function checks the range as follows:

  • For every element of a single-channel input array:

dst(I)= lowerb(I)_0 <= src(I)_0 <= upperb(I)_0

  • For two-channel arrays:

dst(I)= lowerb(I)_0 <= src(I)_0 <= upperb(I)_0 land lowerb(I)_1 <= src(I)_1 <= upperb(I)_1

  • and so forth.

That is, dst (I) is set to 255 (all 1 -bits) if src (I) is within the specified 1D, 2D, 3D,... box and 0 otherwise.

When the lower and/or upper boundary parameters are scalars, the indexes (I) at lowerb and upperb in the above formulas should be omitted.

Parameters:
src - first input array.
lowerb - inclusive lower boundary array or a scalar.
upperb - inclusive upper boundary array or a scalar.
dst - output array of the same size as src and CV_8U type.
See Also:
org.opencv.core.Core.inRange

insertChannel

public static void insertChannel(Mat src,
                                 Mat dst,
                                 int coi)

invert

public static double invert(Mat src,
                            Mat dst)

Finds the inverse or pseudo-inverse of a matrix.

The function invert inverts the matrix src and stores the result in dst. When the matrix src is singular or non-square, the function calculates the pseudo-inverse matrix (the dst matrix) so that norm(src*dst - I) is minimal, where I is an identity matrix.

In case of the DECOMP_LU method, the function returns non-zero value if the inverse has been successfully calculated and 0 if src is singular.

In case of the DECOMP_SVD method, the function returns the inverse condition number of src (the ratio of the smallest singular value to the largest singular value) and 0 if src is singular. The SVD method calculates a pseudo-inverse matrix if src is singular.

Similarly to DECOMP_LU, the method DECOMP_CHOLESKY works only with non-singular square matrices that should also be symmetrical and positively defined. In this case, the function stores the inverted matrix in dst and returns non-zero. Otherwise, it returns 0.

Parameters:
src - input floating-point M x N matrix.
dst - output matrix of N x M size and the same type as src.
See Also:
org.opencv.core.Core.invert, solve(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int)

invert

public static double invert(Mat src,
                            Mat dst,
                            int flags)

Finds the inverse or pseudo-inverse of a matrix.

The function invert inverts the matrix src and stores the result in dst. When the matrix src is singular or non-square, the function calculates the pseudo-inverse matrix (the dst matrix) so that norm(src*dst - I) is minimal, where I is an identity matrix.

In case of the DECOMP_LU method, the function returns non-zero value if the inverse has been successfully calculated and 0 if src is singular.

In case of the DECOMP_SVD method, the function returns the inverse condition number of src (the ratio of the smallest singular value to the largest singular value) and 0 if src is singular. The SVD method calculates a pseudo-inverse matrix if src is singular.

Similarly to DECOMP_LU, the method DECOMP_CHOLESKY works only with non-singular square matrices that should also be symmetrical and positively defined. In this case, the function stores the inverted matrix in dst and returns non-zero. Otherwise, it returns 0.

Parameters:
src - input floating-point M x N matrix.
dst - output matrix of N x M size and the same type as src.
flags - inversion method :
  • DECOMP_LU Gaussian elimination with the optimal pivot element chosen.
  • DECOMP_SVD singular value decomposition (SVD) method.
  • DECOMP_CHOLESKY Cholesky decomposition; the matrix must be symmetrical and positively defined.
See Also:
org.opencv.core.Core.invert, solve(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int)

kmeans

public static double kmeans(Mat data,
                            int K,
                            Mat bestLabels,
                            TermCriteria criteria,
                            int attempts,
                            int flags)

Finds centers of clusters and groups input samples around the clusters.

The function kmeans implements a k-means algorithm that finds the centers of cluster_count clusters and groups the input samples around the clusters. As an output, labels_i contains a 0-based cluster index for the sample stored in the i^(th) row of the samples matrix.

The function returns the compactness measure that is computed as

sum _i|samples _i - centers _(labels _i)| ^2

after every attempt. The best (minimum) value is chosen and the corresponding labels and the compactness value are returned by the function. Basically, you can use only the core of the function, set the number of attempts to 1, initialize labels each time using a custom algorithm, pass them with the (flags = KMEANS_USE_INITIAL_LABELS) flag, and then choose the best (most-compact) clustering.

Note:

  • An example on K-means clustering can be found at opencv_source_code/samples/cpp/kmeans.cpp
  • (Python) An example on K-means clustering can be found at opencv_source_code/samples/python2/kmeans.py

Parameters:
data - Data for clustering.
K - Number of clusters to split the set by.
bestLabels - a bestLabels
criteria - The algorithm termination criteria, that is, the maximum number of iterations and/or the desired accuracy. The accuracy is specified as criteria.epsilon. As soon as each of the cluster centers moves by less than criteria.epsilon on some iteration, the algorithm stops.
attempts - Flag to specify the number of times the algorithm is executed using different initial labellings. The algorithm returns the labels that yield the best compactness (see the last function parameter).
flags - Flag that can take the following values:
  • KMEANS_RANDOM_CENTERS Select random initial centers in each attempt.
  • KMEANS_PP_CENTERS Use kmeans++ center initialization by Arthur and Vassilvitskii [Arthur2007].
  • KMEANS_USE_INITIAL_LABELS During the first (and possibly the only) attempt, use the user-supplied labels instead of computing them from the initial centers. For the second and further attempts, use the random or semi-random centers. Use one of KMEANS_*_CENTERS flag to specify the exact method.
See Also:
org.opencv.core.Core.kmeans

kmeans

public static double kmeans(Mat data,
                            int K,
                            Mat bestLabels,
                            TermCriteria criteria,
                            int attempts,
                            int flags,
                            Mat centers)

Finds centers of clusters and groups input samples around the clusters.

The function kmeans implements a k-means algorithm that finds the centers of cluster_count clusters and groups the input samples around the clusters. As an output, labels_i contains a 0-based cluster index for the sample stored in the i^(th) row of the samples matrix.

The function returns the compactness measure that is computed as

sum _i|samples _i - centers _(labels _i)| ^2

after every attempt. The best (minimum) value is chosen and the corresponding labels and the compactness value are returned by the function. Basically, you can use only the core of the function, set the number of attempts to 1, initialize labels each time using a custom algorithm, pass them with the (flags = KMEANS_USE_INITIAL_LABELS) flag, and then choose the best (most-compact) clustering.

Note:

  • An example on K-means clustering can be found at opencv_source_code/samples/cpp/kmeans.cpp
  • (Python) An example on K-means clustering can be found at opencv_source_code/samples/python2/kmeans.py

Parameters:
data - Data for clustering.
K - Number of clusters to split the set by.
bestLabels - a bestLabels
criteria - The algorithm termination criteria, that is, the maximum number of iterations and/or the desired accuracy. The accuracy is specified as criteria.epsilon. As soon as each of the cluster centers moves by less than criteria.epsilon on some iteration, the algorithm stops.
attempts - Flag to specify the number of times the algorithm is executed using different initial labellings. The algorithm returns the labels that yield the best compactness (see the last function parameter).
flags - Flag that can take the following values:
  • KMEANS_RANDOM_CENTERS Select random initial centers in each attempt.
  • KMEANS_PP_CENTERS Use kmeans++ center initialization by Arthur and Vassilvitskii [Arthur2007].
  • KMEANS_USE_INITIAL_LABELS During the first (and possibly the only) attempt, use the user-supplied labels instead of computing them from the initial centers. For the second and further attempts, use the random or semi-random centers. Use one of KMEANS_*_CENTERS flag to specify the exact method.
centers - Output matrix of the cluster centers, one row per each cluster center.
See Also:
org.opencv.core.Core.kmeans

line

public static void line(Mat img,
                        Point pt1,
                        Point pt2,
                        Scalar color)

Draws a line segment connecting two points.

The function line draws the line segment between pt1 and pt2 points in the image. The line is clipped by the image boundaries. For non-antialiased lines with integer coordinates, the 8-connected or 4-connected Bresenham algorithm is used. Thick lines are drawn with rounding endings. Antialiased lines are drawn using Gaussian filtering. To specify the line color, you may use the macro CV_RGB(r, g, b).

Parameters:
img - Image.
pt1 - First point of the line segment.
pt2 - Second point of the line segment.
color - Line color.
See Also:
org.opencv.core.Core.line

line

public static void line(Mat img,
                        Point pt1,
                        Point pt2,
                        Scalar color,
                        int thickness)

Draws a line segment connecting two points.

The function line draws the line segment between pt1 and pt2 points in the image. The line is clipped by the image boundaries. For non-antialiased lines with integer coordinates, the 8-connected or 4-connected Bresenham algorithm is used. Thick lines are drawn with rounding endings. Antialiased lines are drawn using Gaussian filtering. To specify the line color, you may use the macro CV_RGB(r, g, b).

Parameters:
img - Image.
pt1 - First point of the line segment.
pt2 - Second point of the line segment.
color - Line color.
thickness - Line thickness.
See Also:
org.opencv.core.Core.line

line

public static void line(Mat img,
                        Point pt1,
                        Point pt2,
                        Scalar color,
                        int thickness,
                        int lineType,
                        int shift)

Draws a line segment connecting two points.

The function line draws the line segment between pt1 and pt2 points in the image. The line is clipped by the image boundaries. For non-antialiased lines with integer coordinates, the 8-connected or 4-connected Bresenham algorithm is used. Thick lines are drawn with rounding endings. Antialiased lines are drawn using Gaussian filtering. To specify the line color, you may use the macro CV_RGB(r, g, b).

Parameters:
img - Image.
pt1 - First point of the line segment.
pt2 - Second point of the line segment.
color - Line color.
thickness - Line thickness.
lineType - Type of the line:
  • 8 (or omitted) - 8-connected line.
  • 4 - 4-connected line.
  • CV_AA - antialiased line.
shift - Number of fractional bits in the point coordinates.
See Also:
org.opencv.core.Core.line

log

public static void log(Mat src,
                       Mat dst)

Calculates the natural logarithm of every array element.

The function log calculates the natural logarithm of the absolute value of every element of the input array:

dst(I) = log|src(I)| if src(I) != 0 ; C otherwise

where C is a large negative number (about -700 in the current implementation). The maximum relative error is about 7e-6 for single-precision input and less than 1e-10 for double-precision input. Special values (NaN, Inf) are not handled.

Parameters:
src - input array.
dst - output array of the same size and type as src.
See Also:
org.opencv.core.Core.log, cartToPolar(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, boolean), pow(org.opencv.core.Mat, double, org.opencv.core.Mat), sqrt(org.opencv.core.Mat, org.opencv.core.Mat), magnitude(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat), polarToCart(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, boolean), exp(org.opencv.core.Mat, org.opencv.core.Mat), phase(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, boolean)

LUT

public static void LUT(Mat src,
                       Mat lut,
                       Mat dst)

Performs a look-up table transform of an array.

The function LUT fills the output array with values from the look-up table. Indices of the entries are taken from the input array. That is, the function processes each element of src as follows:

dst(I) <- lut(src(I) + d)

where

d = 0 if src has depth CV_8U; 128 if src has depth CV_8S

Parameters:
src - input array of 8-bit elements.
lut - look-up table of 256 elements; in case of multi-channel input array, the table should either have a single channel (in this case the same table is used for all channels) or the same number of channels as in the input array.
dst - output array of the same size and number of channels as src, and the same depth as lut.
See Also:
org.opencv.core.Core.LUT, Mat.convertTo(org.opencv.core.Mat, int, double, double), convertScaleAbs(org.opencv.core.Mat, org.opencv.core.Mat, double, double)

LUT

public static void LUT(Mat src,
                       Mat lut,
                       Mat dst,
                       int interpolation)

Performs a look-up table transform of an array.

The function LUT fills the output array with values from the look-up table. Indices of the entries are taken from the input array. That is, the function processes each element of src as follows:

dst(I) <- lut(src(I) + d)

where

d = 0 if src has depth CV_8U; 128 if src has depth CV_8S

Parameters:
src - input array of 8-bit elements.
lut - look-up table of 256 elements; in case of multi-channel input array, the table should either have a single channel (in this case the same table is used for all channels) or the same number of channels as in the input array.
dst - output array of the same size and number of channels as src, and the same depth as lut.
interpolation - a interpolation
See Also:
org.opencv.core.Core.LUT, Mat.convertTo(org.opencv.core.Mat, int, double, double), convertScaleAbs(org.opencv.core.Mat, org.opencv.core.Mat, double, double)

magnitude

public static void magnitude(Mat x,
                             Mat y,
                             Mat magnitude)

Calculates the magnitude of 2D vectors.

The function magnitude calculates the magnitude of 2D vectors formed from the corresponding elements of x and y arrays:

dst(I) = sqrt(x(I)^2 + y(I)^2)

Parameters:
x - floating-point array of x-coordinates of the vectors.
y - floating-point array of y-coordinates of the vectors; it must have the same size as x.
magnitude - output array of the same size and type as x.
See Also:
org.opencv.core.Core.magnitude, cartToPolar(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, boolean), phase(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, boolean), sqrt(org.opencv.core.Mat, org.opencv.core.Mat), polarToCart(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, boolean)

Mahalanobis

public static double Mahalanobis(Mat v1,
                                 Mat v2,
                                 Mat icovar)

Calculates the Mahalanobis distance between two vectors.

The function Mahalanobis calculates and returns the weighted distance between two vectors:

d(vec1, vec2)= sqrt(sum_(i,j)(icovar(i,j)*(vec1(I)-vec2(I))*(vec1(j)-vec2(j))))

The covariance matrix may be calculated using the "calcCovarMatrix" function and then inverted using the "invert" function (preferably using the DECOMP_SVD method, as the most accurate).

Parameters:
v1 - a v1
v2 - a v2
icovar - inverse covariance matrix.
See Also:
org.opencv.core.Core.Mahalanobis

max

public static void max(Mat src1,
                       Mat src2,
                       Mat dst)

Calculates per-element maximum of two arrays or an array and a scalar.

The functions max calculate the per-element maximum of two arrays:

dst(I)= max(src1(I), src2(I))

or array and a scalar:

dst(I)= max(src1(I), value)

In the second variant, when the input array is multi-channel, each channel is compared with value independently.

The first 3 variants of the function listed above are actually a part of "MatrixExpressions". They return an expression object that can be further either transformed/ assigned to a matrix, or passed to a function, and so on.

Parameters:
src1 - first input array.
src2 - second input array of the same size and type as src1.
dst - output array of the same size and type as src1.
See Also:
org.opencv.core.Core.max, compare(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), inRange(org.opencv.core.Mat, org.opencv.core.Scalar, org.opencv.core.Scalar, org.opencv.core.Mat), minMaxLoc(org.opencv.core.Mat, org.opencv.core.Mat), min(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat)

max

public static void max(Mat src1,
                       Scalar src2,
                       Mat dst)

Calculates per-element maximum of two arrays or an array and a scalar.

The functions max calculate the per-element maximum of two arrays:

dst(I)= max(src1(I), src2(I))

or array and a scalar:

dst(I)= max(src1(I), value)

In the second variant, when the input array is multi-channel, each channel is compared with value independently.

The first 3 variants of the function listed above are actually a part of "MatrixExpressions". They return an expression object that can be further either transformed/ assigned to a matrix, or passed to a function, and so on.

Parameters:
src1 - first input array.
src2 - second input array of the same size and type as src1.
dst - output array of the same size and type as src1.
See Also:
org.opencv.core.Core.max, compare(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), inRange(org.opencv.core.Mat, org.opencv.core.Scalar, org.opencv.core.Scalar, org.opencv.core.Mat), minMaxLoc(org.opencv.core.Mat, org.opencv.core.Mat), min(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat)

mean

public static Scalar mean(Mat src)

Calculates an average (mean) of array elements.

The function mean calculates the mean value M of array elements, independently for each channel, and return it:

N = sum(by: I: mask(I) != 0) 1 M_c = (sum(by: I: mask(I) != 0)(mtx(I)_c))/N

When all the mask elements are 0's, the functions return Scalar.all(0).

Parameters:
src - input array that should have from 1 to 4 channels so that the result can be stored in "Scalar_".
See Also:
org.opencv.core.Core.mean, countNonZero(org.opencv.core.Mat), meanStdDev(org.opencv.core.Mat, org.opencv.core.MatOfDouble, org.opencv.core.MatOfDouble, org.opencv.core.Mat), norm(org.opencv.core.Mat, int, org.opencv.core.Mat), minMaxLoc(org.opencv.core.Mat, org.opencv.core.Mat)

mean

public static Scalar mean(Mat src,
                          Mat mask)

Calculates an average (mean) of array elements.

The function mean calculates the mean value M of array elements, independently for each channel, and return it:

N = sum(by: I: mask(I) != 0) 1 M_c = (sum(by: I: mask(I) != 0)(mtx(I)_c))/N

When all the mask elements are 0's, the functions return Scalar.all(0).

Parameters:
src - input array that should have from 1 to 4 channels so that the result can be stored in "Scalar_".
mask - optional operation mask.
See Also:
org.opencv.core.Core.mean, countNonZero(org.opencv.core.Mat), meanStdDev(org.opencv.core.Mat, org.opencv.core.MatOfDouble, org.opencv.core.MatOfDouble, org.opencv.core.Mat), norm(org.opencv.core.Mat, int, org.opencv.core.Mat), minMaxLoc(org.opencv.core.Mat, org.opencv.core.Mat)

meanStdDev

public static void meanStdDev(Mat src,
                              MatOfDouble mean,
                              MatOfDouble stddev)

Calculates a mean and standard deviation of array elements.

The function meanStdDev calculates the mean and the standard deviation M of array elements independently for each channel and returns it via the output parameters:

N = sum(by: I, mask(I) != 0) 1 mean _c = (sum_(I: mask(I) != 0) src(I)_c)/(N) stddev _c = sqrt((sum_(I: mask(I) != 0)(src(I)_c - mean _c)^2)/(N))

When all the mask elements are 0's, the functions return mean=stddev=Scalar.all(0).

Note: The calculated standard deviation is only the diagonal of the complete normalized covariance matrix. If the full matrix is needed, you can reshape the multi-channel array M x N to the single-channel array M*N x mtx.channels() (only possible when the matrix is continuous) and then pass the matrix to "calcCovarMatrix".

Parameters:
src - input array that should have from 1 to 4 channels so that the results can be stored in "Scalar_" 's.
mean - output parameter: calculated mean value.
stddev - output parameter: calculateded standard deviation.
See Also:
org.opencv.core.Core.meanStdDev, countNonZero(org.opencv.core.Mat), calcCovarMatrix(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int, int), minMaxLoc(org.opencv.core.Mat, org.opencv.core.Mat), norm(org.opencv.core.Mat, int, org.opencv.core.Mat), mean(org.opencv.core.Mat, org.opencv.core.Mat)

meanStdDev

public static void meanStdDev(Mat src,
                              MatOfDouble mean,
                              MatOfDouble stddev,
                              Mat mask)

Calculates a mean and standard deviation of array elements.

The function meanStdDev calculates the mean and the standard deviation M of array elements independently for each channel and returns it via the output parameters:

N = sum(by: I, mask(I) != 0) 1 mean _c = (sum_(I: mask(I) != 0) src(I)_c)/(N) stddev _c = sqrt((sum_(I: mask(I) != 0)(src(I)_c - mean _c)^2)/(N))

When all the mask elements are 0's, the functions return mean=stddev=Scalar.all(0).

Note: The calculated standard deviation is only the diagonal of the complete normalized covariance matrix. If the full matrix is needed, you can reshape the multi-channel array M x N to the single-channel array M*N x mtx.channels() (only possible when the matrix is continuous) and then pass the matrix to "calcCovarMatrix".

Parameters:
src - input array that should have from 1 to 4 channels so that the results can be stored in "Scalar_" 's.
mean - output parameter: calculated mean value.
stddev - output parameter: calculateded standard deviation.
mask - optional operation mask.
See Also:
org.opencv.core.Core.meanStdDev, countNonZero(org.opencv.core.Mat), calcCovarMatrix(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int, int), minMaxLoc(org.opencv.core.Mat, org.opencv.core.Mat), norm(org.opencv.core.Mat, int, org.opencv.core.Mat), mean(org.opencv.core.Mat, org.opencv.core.Mat)

merge

public static void merge(java.util.List<Mat> mv,
                         Mat dst)

Creates one multichannel array out of several single-channel ones.

The functions merge merge several arrays to make a single multi-channel array. That is, each element of the output array will be a concatenation of the elements of the input arrays, where elements of i-th input array are treated as mv[i].channels()-element vectors.

The function "split" does the reverse operation. If you need to shuffle channels in some other advanced way, use "mixChannels".

Parameters:
mv - input array or vector of matrices to be merged; all the matrices in mv must have the same size and the same depth.
dst - output array of the same size and the same depth as mv[0]; The number of channels will be the total number of channels in the matrix array.
See Also:
org.opencv.core.Core.merge, Mat.reshape(int, int), mixChannels(java.util.List, java.util.List, org.opencv.core.MatOfInt), split(org.opencv.core.Mat, java.util.List)

min

public static void min(Mat src1,
                       Mat src2,
                       Mat dst)

Calculates per-element minimum of two arrays or an array and a scalar.

The functions min calculate the per-element minimum of two arrays:

dst(I)= min(src1(I), src2(I))

or array and a scalar:

dst(I)= min(src1(I), value)

In the second variant, when the input array is multi-channel, each channel is compared with value independently.

The first three variants of the function listed above are actually a part of "MatrixExpressions". They return the expression object that can be further either transformed/assigned to a matrix, or passed to a function, and so on.

Parameters:
src1 - first input array.
src2 - second input array of the same size and type as src1.
dst - output array of the same size and type as src1.
See Also:
org.opencv.core.Core.min, max(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat), compare(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), inRange(org.opencv.core.Mat, org.opencv.core.Scalar, org.opencv.core.Scalar, org.opencv.core.Mat), minMaxLoc(org.opencv.core.Mat, org.opencv.core.Mat)

min

public static void min(Mat src1,
                       Scalar src2,
                       Mat dst)

Calculates per-element minimum of two arrays or an array and a scalar.

The functions min calculate the per-element minimum of two arrays:

dst(I)= min(src1(I), src2(I))

or array and a scalar:

dst(I)= min(src1(I), value)

In the second variant, when the input array is multi-channel, each channel is compared with value independently.

The first three variants of the function listed above are actually a part of "MatrixExpressions". They return the expression object that can be further either transformed/assigned to a matrix, or passed to a function, and so on.

Parameters:
src1 - first input array.
src2 - second input array of the same size and type as src1.
dst - output array of the same size and type as src1.
See Also:
org.opencv.core.Core.min, max(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat), compare(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), inRange(org.opencv.core.Mat, org.opencv.core.Scalar, org.opencv.core.Scalar, org.opencv.core.Mat), minMaxLoc(org.opencv.core.Mat, org.opencv.core.Mat)

minMaxLoc

public static Core.MinMaxLocResult minMaxLoc(Mat src)

Finds the global minimum and maximum in an array.

The functions minMaxLoc find the minimum and maximum element values and their positions. The extremums are searched across the whole array or, if mask is not an empty array, in the specified array region.

The functions do not work with multi-channel arrays. If you need to find minimum or maximum elements across all the channels, use "Mat.reshape" first to reinterpret the array as single-channel. Or you may extract the particular channel using either "extractImageCOI", or "mixChannels", or "split".

Parameters:
src - input single-channel array.
See Also:
org.opencv.core.Core.minMaxLoc, compare(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), min(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat), mixChannels(java.util.List, java.util.List, org.opencv.core.MatOfInt), Mat.reshape(int, int), split(org.opencv.core.Mat, java.util.List), max(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat), inRange(org.opencv.core.Mat, org.opencv.core.Scalar, org.opencv.core.Scalar, org.opencv.core.Mat)

minMaxLoc

public static Core.MinMaxLocResult minMaxLoc(Mat src,
                                             Mat mask)

Finds the global minimum and maximum in an array.

The functions minMaxLoc find the minimum and maximum element values and their positions. The extremums are searched across the whole array or, if mask is not an empty array, in the specified array region.

The functions do not work with multi-channel arrays. If you need to find minimum or maximum elements across all the channels, use "Mat.reshape" first to reinterpret the array as single-channel. Or you may extract the particular channel using either "extractImageCOI", or "mixChannels", or "split".

Parameters:
src - input single-channel array.
mask - optional mask used to select a sub-array.
See Also:
org.opencv.core.Core.minMaxLoc, compare(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), min(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat), mixChannels(java.util.List, java.util.List, org.opencv.core.MatOfInt), Mat.reshape(int, int), split(org.opencv.core.Mat, java.util.List), max(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat), inRange(org.opencv.core.Mat, org.opencv.core.Scalar, org.opencv.core.Scalar, org.opencv.core.Mat)

mixChannels

public static void mixChannels(java.util.List<Mat> src,
                               java.util.List<Mat> dst,
                               MatOfInt fromTo)

Copies specified channels from input arrays to the specified channels of output arrays.

The functions mixChannels provide an advanced mechanism for shuffling image channels.

"split" and "merge" and some forms of "cvtColor" are partial cases of mixChannels. In the example below, the code splits a 4-channel RGBA image into a 3-channel BGR (with R and B channels swapped) and a separate alpha-channel image:

// C++ code:

Mat rgba(100, 100, CV_8UC4, Scalar(1,2,3,4));

Mat bgr(rgba.rows, rgba.cols, CV_8UC3);

Mat alpha(rgba.rows, rgba.cols, CV_8UC1);

// forming an array of matrices is a quite efficient operation,

// because the matrix data is not copied, only the headers

Mat out[] = { bgr, alpha };

// rgba[0] -> bgr[2], rgba[1] -> bgr[1],

// rgba[2] -> bgr[0], rgba[3] -> alpha[0]

int from_to[] = { 0,2, 1,1, 2,0, 3,3 };

mixChannels(&rgba, 1, out, 2, from_to, 4);

Note: Unlike many other new-style C++ functions in OpenCV (see the introduction section and "Mat.create"), mixChannels requires the output arrays to be pre-allocated before calling the function.

Parameters:
src - input array or vector of matricesl; all of the matrices must have the same size and the same depth.
dst - output array or vector of matrices; all the matrices *must be allocated*; their size and depth must be the same as in src[0].
fromTo - array of index pairs specifying which channels are copied and where; fromTo[k*2] is a 0-based index of the input channel in src, fromTo[k*2+1] is an index of the output channel in dst; the continuous channel numbering is used: the first input image channels are indexed from 0 to src[0].channels()-1, the second input image channels are indexed from src[0].channels() to src[0].channels() + src[1].channels()-1, and so on, the same scheme is used for the output image channels; as a special case, when fromTo[k*2] is negative, the corresponding output channel is filled with zero.
See Also:
org.opencv.core.Core.mixChannels, merge(java.util.List, org.opencv.core.Mat), split(org.opencv.core.Mat, java.util.List), Imgproc.cvtColor(org.opencv.core.Mat, org.opencv.core.Mat, int, int)

mulSpectrums

public static void mulSpectrums(Mat a,
                                Mat b,
                                Mat c,
                                int flags)

Performs the per-element multiplication of two Fourier spectrums.

The function mulSpectrums performs the per-element multiplication of the two CCS-packed or complex matrices that are results of a real or complex Fourier transform.

The function, together with "dft" and "idft", may be used to calculate convolution (pass conjB=false) or correlation (pass conjB=true) of two arrays rapidly. When the arrays are complex, they are simply multiplied (per element) with an optional conjugation of the second-array elements. When the arrays are real, they are assumed to be CCS-packed (see "dft" for details).

Parameters:
a - a a
b - a b
c - a c
flags - operation flags; currently, the only supported flag is DFT_ROWS, which indicates that each row of src1 and src2 is an independent 1D Fourier spectrum.
See Also:
org.opencv.core.Core.mulSpectrums

mulSpectrums

public static void mulSpectrums(Mat a,
                                Mat b,
                                Mat c,
                                int flags,
                                boolean conjB)

Performs the per-element multiplication of two Fourier spectrums.

The function mulSpectrums performs the per-element multiplication of the two CCS-packed or complex matrices that are results of a real or complex Fourier transform.

The function, together with "dft" and "idft", may be used to calculate convolution (pass conjB=false) or correlation (pass conjB=true) of two arrays rapidly. When the arrays are complex, they are simply multiplied (per element) with an optional conjugation of the second-array elements. When the arrays are real, they are assumed to be CCS-packed (see "dft" for details).

Parameters:
a - a a
b - a b
c - a c
flags - operation flags; currently, the only supported flag is DFT_ROWS, which indicates that each row of src1 and src2 is an independent 1D Fourier spectrum.
conjB - optional flag that conjugates the second input array before the multiplication (true) or not (false).
See Also:
org.opencv.core.Core.mulSpectrums

multiply

public static void multiply(Mat src1,
                            Mat src2,
                            Mat dst)

Calculates the per-element scaled product of two arrays.

The function multiply calculates the per-element product of two arrays:

dst(I)= saturate(scale * src1(I) * src2(I))

There is also a "MatrixExpressions" -friendly variant of the first function. See "Mat.mul".

For a not-per-element matrix product, see "gemm".

Note: Saturation is not applied when the output array has the depth CV_32S. You may even get result of an incorrect sign in the case of overflow.

Parameters:
src1 - first input array.
src2 - second input array of the same size and the same type as src1.
dst - output array of the same size and type as src1.
See Also:
org.opencv.core.Core.multiply, divide(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, double, int), Mat.convertTo(org.opencv.core.Mat, int, double, double), addWeighted(org.opencv.core.Mat, double, org.opencv.core.Mat, double, double, org.opencv.core.Mat, int), add(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), Imgproc.accumulateSquare(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat), Imgproc.accumulate(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat), scaleAdd(org.opencv.core.Mat, double, org.opencv.core.Mat, org.opencv.core.Mat), subtract(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), Imgproc.accumulateProduct(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat)

multiply

public static void multiply(Mat src1,
                            Mat src2,
                            Mat dst,
                            double scale)

Calculates the per-element scaled product of two arrays.

The function multiply calculates the per-element product of two arrays:

dst(I)= saturate(scale * src1(I) * src2(I))

There is also a "MatrixExpressions" -friendly variant of the first function. See "Mat.mul".

For a not-per-element matrix product, see "gemm".

Note: Saturation is not applied when the output array has the depth CV_32S. You may even get result of an incorrect sign in the case of overflow.

Parameters:
src1 - first input array.
src2 - second input array of the same size and the same type as src1.
dst - output array of the same size and type as src1.
scale - optional scale factor.
See Also:
org.opencv.core.Core.multiply, divide(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, double, int), Mat.convertTo(org.opencv.core.Mat, int, double, double), addWeighted(org.opencv.core.Mat, double, org.opencv.core.Mat, double, double, org.opencv.core.Mat, int), add(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), Imgproc.accumulateSquare(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat), Imgproc.accumulate(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat), scaleAdd(org.opencv.core.Mat, double, org.opencv.core.Mat, org.opencv.core.Mat), subtract(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), Imgproc.accumulateProduct(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat)

multiply

public static void multiply(Mat src1,
                            Mat src2,
                            Mat dst,
                            double scale,
                            int dtype)

Calculates the per-element scaled product of two arrays.

The function multiply calculates the per-element product of two arrays:

dst(I)= saturate(scale * src1(I) * src2(I))

There is also a "MatrixExpressions" -friendly variant of the first function. See "Mat.mul".

For a not-per-element matrix product, see "gemm".

Note: Saturation is not applied when the output array has the depth CV_32S. You may even get result of an incorrect sign in the case of overflow.

Parameters:
src1 - first input array.
src2 - second input array of the same size and the same type as src1.
dst - output array of the same size and type as src1.
scale - optional scale factor.
dtype - a dtype
See Also:
org.opencv.core.Core.multiply, divide(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, double, int), Mat.convertTo(org.opencv.core.Mat, int, double, double), addWeighted(org.opencv.core.Mat, double, org.opencv.core.Mat, double, double, org.opencv.core.Mat, int), add(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), Imgproc.accumulateSquare(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat), Imgproc.accumulate(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat), scaleAdd(org.opencv.core.Mat, double, org.opencv.core.Mat, org.opencv.core.Mat), subtract(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), Imgproc.accumulateProduct(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat)

multiply

public static void multiply(Mat src1,
                            Scalar src2,
                            Mat dst)

Calculates the per-element scaled product of two arrays.

The function multiply calculates the per-element product of two arrays:

dst(I)= saturate(scale * src1(I) * src2(I))

There is also a "MatrixExpressions" -friendly variant of the first function. See "Mat.mul".

For a not-per-element matrix product, see "gemm".

Note: Saturation is not applied when the output array has the depth CV_32S. You may even get result of an incorrect sign in the case of overflow.

Parameters:
src1 - first input array.
src2 - second input array of the same size and the same type as src1.
dst - output array of the same size and type as src1.
See Also:
org.opencv.core.Core.multiply, divide(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, double, int), Mat.convertTo(org.opencv.core.Mat, int, double, double), addWeighted(org.opencv.core.Mat, double, org.opencv.core.Mat, double, double, org.opencv.core.Mat, int), add(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), Imgproc.accumulateSquare(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat), Imgproc.accumulate(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat), scaleAdd(org.opencv.core.Mat, double, org.opencv.core.Mat, org.opencv.core.Mat), subtract(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), Imgproc.accumulateProduct(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat)

multiply

public static void multiply(Mat src1,
                            Scalar src2,
                            Mat dst,
                            double scale)

Calculates the per-element scaled product of two arrays.

The function multiply calculates the per-element product of two arrays:

dst(I)= saturate(scale * src1(I) * src2(I))

There is also a "MatrixExpressions" -friendly variant of the first function. See "Mat.mul".

For a not-per-element matrix product, see "gemm".

Note: Saturation is not applied when the output array has the depth CV_32S. You may even get result of an incorrect sign in the case of overflow.

Parameters:
src1 - first input array.
src2 - second input array of the same size and the same type as src1.
dst - output array of the same size and type as src1.
scale - optional scale factor.
See Also:
org.opencv.core.Core.multiply, divide(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, double, int), Mat.convertTo(org.opencv.core.Mat, int, double, double), addWeighted(org.opencv.core.Mat, double, org.opencv.core.Mat, double, double, org.opencv.core.Mat, int), add(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), Imgproc.accumulateSquare(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat), Imgproc.accumulate(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat), scaleAdd(org.opencv.core.Mat, double, org.opencv.core.Mat, org.opencv.core.Mat), subtract(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), Imgproc.accumulateProduct(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat)

multiply

public static void multiply(Mat src1,
                            Scalar src2,
                            Mat dst,
                            double scale,
                            int dtype)

Calculates the per-element scaled product of two arrays.

The function multiply calculates the per-element product of two arrays:

dst(I)= saturate(scale * src1(I) * src2(I))

There is also a "MatrixExpressions" -friendly variant of the first function. See "Mat.mul".

For a not-per-element matrix product, see "gemm".

Note: Saturation is not applied when the output array has the depth CV_32S. You may even get result of an incorrect sign in the case of overflow.

Parameters:
src1 - first input array.
src2 - second input array of the same size and the same type as src1.
dst - output array of the same size and type as src1.
scale - optional scale factor.
dtype - a dtype
See Also:
org.opencv.core.Core.multiply, divide(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, double, int), Mat.convertTo(org.opencv.core.Mat, int, double, double), addWeighted(org.opencv.core.Mat, double, org.opencv.core.Mat, double, double, org.opencv.core.Mat, int), add(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), Imgproc.accumulateSquare(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat), Imgproc.accumulate(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat), scaleAdd(org.opencv.core.Mat, double, org.opencv.core.Mat, org.opencv.core.Mat), subtract(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), Imgproc.accumulateProduct(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat)

mulTransposed

public static void mulTransposed(Mat src,
                                 Mat dst,
                                 boolean aTa)

Calculates the product of a matrix and its transposition.

The function mulTransposed calculates the product of src and its transposition:

dst = scale(src - delta)^T(src - delta)

if aTa=true, and

dst = scale(src - delta)(src - delta)^T

otherwise. The function is used to calculate the covariance matrix. With zero delta, it can be used as a faster substitute for general matrix product A*B when B=A'

Parameters:
src - input single-channel matrix. Note that unlike "gemm", the function can multiply not only floating-point matrices.
dst - output square matrix.
aTa - Flag specifying the multiplication ordering. See the description below.
See Also:
org.opencv.core.Core.mulTransposed, calcCovarMatrix(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int, int), repeat(org.opencv.core.Mat, int, int, org.opencv.core.Mat), reduce(org.opencv.core.Mat, org.opencv.core.Mat, int, int, int), gemm(org.opencv.core.Mat, org.opencv.core.Mat, double, org.opencv.core.Mat, double, org.opencv.core.Mat, int)

mulTransposed

public static void mulTransposed(Mat src,
                                 Mat dst,
                                 boolean aTa,
                                 Mat delta,
                                 double scale)

Calculates the product of a matrix and its transposition.

The function mulTransposed calculates the product of src and its transposition:

dst = scale(src - delta)^T(src - delta)

if aTa=true, and

dst = scale(src - delta)(src - delta)^T

otherwise. The function is used to calculate the covariance matrix. With zero delta, it can be used as a faster substitute for general matrix product A*B when B=A'

Parameters:
src - input single-channel matrix. Note that unlike "gemm", the function can multiply not only floating-point matrices.
dst - output square matrix.
aTa - Flag specifying the multiplication ordering. See the description below.
delta - Optional delta matrix subtracted from src before the multiplication. When the matrix is empty (delta=noArray()), it is assumed to be zero, that is, nothing is subtracted. If it has the same size as src, it is simply subtracted. Otherwise, it is "repeated" (see "repeat") to cover the full src and then subtracted. Type of the delta matrix, when it is not empty, must be the same as the type of created output matrix. See the dtype parameter description below.
scale - Optional scale factor for the matrix product.
See Also:
org.opencv.core.Core.mulTransposed, calcCovarMatrix(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int, int), repeat(org.opencv.core.Mat, int, int, org.opencv.core.Mat), reduce(org.opencv.core.Mat, org.opencv.core.Mat, int, int, int), gemm(org.opencv.core.Mat, org.opencv.core.Mat, double, org.opencv.core.Mat, double, org.opencv.core.Mat, int)

mulTransposed

public static void mulTransposed(Mat src,
                                 Mat dst,
                                 boolean aTa,
                                 Mat delta,
                                 double scale,
                                 int dtype)

Calculates the product of a matrix and its transposition.

The function mulTransposed calculates the product of src and its transposition:

dst = scale(src - delta)^T(src - delta)

if aTa=true, and

dst = scale(src - delta)(src - delta)^T

otherwise. The function is used to calculate the covariance matrix. With zero delta, it can be used as a faster substitute for general matrix product A*B when B=A'

Parameters:
src - input single-channel matrix. Note that unlike "gemm", the function can multiply not only floating-point matrices.
dst - output square matrix.
aTa - Flag specifying the multiplication ordering. See the description below.
delta - Optional delta matrix subtracted from src before the multiplication. When the matrix is empty (delta=noArray()), it is assumed to be zero, that is, nothing is subtracted. If it has the same size as src, it is simply subtracted. Otherwise, it is "repeated" (see "repeat") to cover the full src and then subtracted. Type of the delta matrix, when it is not empty, must be the same as the type of created output matrix. See the dtype parameter description below.
scale - Optional scale factor for the matrix product.
dtype - Optional type of the output matrix. When it is negative, the output matrix will have the same type as src. Otherwise, it will be type=CV_MAT_DEPTH(dtype) that should be either CV_32F or CV_64F.
See Also:
org.opencv.core.Core.mulTransposed, calcCovarMatrix(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int, int), repeat(org.opencv.core.Mat, int, int, org.opencv.core.Mat), reduce(org.opencv.core.Mat, org.opencv.core.Mat, int, int, int), gemm(org.opencv.core.Mat, org.opencv.core.Mat, double, org.opencv.core.Mat, double, org.opencv.core.Mat, int)

norm

public static double norm(Mat src1)

Calculates an absolute array norm, an absolute difference norm, or a relative difference norm.

The functions norm calculate an absolute norm of src1 (when there is no src2):

norm = forkthree(|src1|_(L_(infty)) = max _I|src1(I)|)(if normType = NORM_INF)<BR>(|src1|_(L_1) = sum _I|src1(I)|)(if normType = NORM_L1)<BR>(|src1|_(L_2) = sqrt(sum_I src1(I)^2))(if normType = NORM_L2)

or an absolute or relative difference norm if src2 is there:

norm = forkthree(|src1-src2|_(L_(infty)) = max _I|src1(I) - src2(I)|)(if normType = NORM_INF)<BR>(|src1 - src2|_(L_1) = sum _I|src1(I) - src2(I)|)(if normType = NORM_L1)<BR>(|src1 - src2|_(L_2) = sqrt(sum_I(src1(I) - src2(I))^2))(if normType = NORM_L2)

or

norm = forkthree((|src1-src2|_(L_(infty)))/(|src2|_(L_(infty))))(if normType = NORM_RELATIVE_INF)<BR>((|src1-src2|_(L_1))/(|src2|_(L_1)))(if normType = NORM_RELATIVE_L1)<BR>((|src1-src2|_(L_2))/(|src2|_(L_2)))(if normType = NORM_RELATIVE_L2)

The functions norm return the calculated norm.

When the mask parameter is specified and it is not empty, the norm is calculated only over the region specified by the mask.

A multi-channel input arrays are treated as a single-channel, that is, the results for all channels are combined.

Parameters:
src1 - first input array.
See Also:
org.opencv.core.Core.norm

norm

public static double norm(Mat src1,
                          int normType)

Calculates an absolute array norm, an absolute difference norm, or a relative difference norm.

The functions norm calculate an absolute norm of src1 (when there is no src2):

norm = forkthree(|src1|_(L_(infty)) = max _I|src1(I)|)(if normType = NORM_INF)<BR>(|src1|_(L_1) = sum _I|src1(I)|)(if normType = NORM_L1)<BR>(|src1|_(L_2) = sqrt(sum_I src1(I)^2))(if normType = NORM_L2)

or an absolute or relative difference norm if src2 is there:

norm = forkthree(|src1-src2|_(L_(infty)) = max _I|src1(I) - src2(I)|)(if normType = NORM_INF)<BR>(|src1 - src2|_(L_1) = sum _I|src1(I) - src2(I)|)(if normType = NORM_L1)<BR>(|src1 - src2|_(L_2) = sqrt(sum_I(src1(I) - src2(I))^2))(if normType = NORM_L2)

or

norm = forkthree((|src1-src2|_(L_(infty)))/(|src2|_(L_(infty))))(if normType = NORM_RELATIVE_INF)<BR>((|src1-src2|_(L_1))/(|src2|_(L_1)))(if normType = NORM_RELATIVE_L1)<BR>((|src1-src2|_(L_2))/(|src2|_(L_2)))(if normType = NORM_RELATIVE_L2)

The functions norm return the calculated norm.

When the mask parameter is specified and it is not empty, the norm is calculated only over the region specified by the mask.

A multi-channel input arrays are treated as a single-channel, that is, the results for all channels are combined.

Parameters:
src1 - first input array.
normType - type of the norm (see the details below).
See Also:
org.opencv.core.Core.norm

norm

public static double norm(Mat src1,
                          int normType,
                          Mat mask)

Calculates an absolute array norm, an absolute difference norm, or a relative difference norm.

The functions norm calculate an absolute norm of src1 (when there is no src2):

norm = forkthree(|src1|_(L_(infty)) = max _I|src1(I)|)(if normType = NORM_INF)<BR>(|src1|_(L_1) = sum _I|src1(I)|)(if normType = NORM_L1)<BR>(|src1|_(L_2) = sqrt(sum_I src1(I)^2))(if normType = NORM_L2)

or an absolute or relative difference norm if src2 is there:

norm = forkthree(|src1-src2|_(L_(infty)) = max _I|src1(I) - src2(I)|)(if normType = NORM_INF)<BR>(|src1 - src2|_(L_1) = sum _I|src1(I) - src2(I)|)(if normType = NORM_L1)<BR>(|src1 - src2|_(L_2) = sqrt(sum_I(src1(I) - src2(I))^2))(if normType = NORM_L2)

or

norm = forkthree((|src1-src2|_(L_(infty)))/(|src2|_(L_(infty))))(if normType = NORM_RELATIVE_INF)<BR>((|src1-src2|_(L_1))/(|src2|_(L_1)))(if normType = NORM_RELATIVE_L1)<BR>((|src1-src2|_(L_2))/(|src2|_(L_2)))(if normType = NORM_RELATIVE_L2)

The functions norm return the calculated norm.

When the mask parameter is specified and it is not empty, the norm is calculated only over the region specified by the mask.

A multi-channel input arrays are treated as a single-channel, that is, the results for all channels are combined.

Parameters:
src1 - first input array.
normType - type of the norm (see the details below).
mask - optional operation mask; it must have the same size as src1 and CV_8UC1 type.
See Also:
org.opencv.core.Core.norm

norm

public static double norm(Mat src1,
                          Mat src2)

Calculates an absolute array norm, an absolute difference norm, or a relative difference norm.

The functions norm calculate an absolute norm of src1 (when there is no src2):

norm = forkthree(|src1|_(L_(infty)) = max _I|src1(I)|)(if normType = NORM_INF)<BR>(|src1|_(L_1) = sum _I|src1(I)|)(if normType = NORM_L1)<BR>(|src1|_(L_2) = sqrt(sum_I src1(I)^2))(if normType = NORM_L2)

or an absolute or relative difference norm if src2 is there:

norm = forkthree(|src1-src2|_(L_(infty)) = max _I|src1(I) - src2(I)|)(if normType = NORM_INF)<BR>(|src1 - src2|_(L_1) = sum _I|src1(I) - src2(I)|)(if normType = NORM_L1)<BR>(|src1 - src2|_(L_2) = sqrt(sum_I(src1(I) - src2(I))^2))(if normType = NORM_L2)

or

norm = forkthree((|src1-src2|_(L_(infty)))/(|src2|_(L_(infty))))(if normType = NORM_RELATIVE_INF)<BR>((|src1-src2|_(L_1))/(|src2|_(L_1)))(if normType = NORM_RELATIVE_L1)<BR>((|src1-src2|_(L_2))/(|src2|_(L_2)))(if normType = NORM_RELATIVE_L2)

The functions norm return the calculated norm.

When the mask parameter is specified and it is not empty, the norm is calculated only over the region specified by the mask.

A multi-channel input arrays are treated as a single-channel, that is, the results for all channels are combined.

Parameters:
src1 - first input array.
src2 - second input array of the same size and the same type as src1.
See Also:
org.opencv.core.Core.norm

norm

public static double norm(Mat src1,
                          Mat src2,
                          int normType)

Calculates an absolute array norm, an absolute difference norm, or a relative difference norm.

The functions norm calculate an absolute norm of src1 (when there is no src2):

norm = forkthree(|src1|_(L_(infty)) = max _I|src1(I)|)(if normType = NORM_INF)<BR>(|src1|_(L_1) = sum _I|src1(I)|)(if normType = NORM_L1)<BR>(|src1|_(L_2) = sqrt(sum_I src1(I)^2))(if normType = NORM_L2)

or an absolute or relative difference norm if src2 is there:

norm = forkthree(|src1-src2|_(L_(infty)) = max _I|src1(I) - src2(I)|)(if normType = NORM_INF)<BR>(|src1 - src2|_(L_1) = sum _I|src1(I) - src2(I)|)(if normType = NORM_L1)<BR>(|src1 - src2|_(L_2) = sqrt(sum_I(src1(I) - src2(I))^2))(if normType = NORM_L2)

or

norm = forkthree((|src1-src2|_(L_(infty)))/(|src2|_(L_(infty))))(if normType = NORM_RELATIVE_INF)<BR>((|src1-src2|_(L_1))/(|src2|_(L_1)))(if normType = NORM_RELATIVE_L1)<BR>((|src1-src2|_(L_2))/(|src2|_(L_2)))(if normType = NORM_RELATIVE_L2)

The functions norm return the calculated norm.

When the mask parameter is specified and it is not empty, the norm is calculated only over the region specified by the mask.

A multi-channel input arrays are treated as a single-channel, that is, the results for all channels are combined.

Parameters:
src1 - first input array.
src2 - second input array of the same size and the same type as src1.
normType - type of the norm (see the details below).
See Also:
org.opencv.core.Core.norm

norm

public static double norm(Mat src1,
                          Mat src2,
                          int normType,
                          Mat mask)

Calculates an absolute array norm, an absolute difference norm, or a relative difference norm.

The functions norm calculate an absolute norm of src1 (when there is no src2):

norm = forkthree(|src1|_(L_(infty)) = max _I|src1(I)|)(if normType = NORM_INF)<BR>(|src1|_(L_1) = sum _I|src1(I)|)(if normType = NORM_L1)<BR>(|src1|_(L_2) = sqrt(sum_I src1(I)^2))(if normType = NORM_L2)

or an absolute or relative difference norm if src2 is there:

norm = forkthree(|src1-src2|_(L_(infty)) = max _I|src1(I) - src2(I)|)(if normType = NORM_INF)<BR>(|src1 - src2|_(L_1) = sum _I|src1(I) - src2(I)|)(if normType = NORM_L1)<BR>(|src1 - src2|_(L_2) = sqrt(sum_I(src1(I) - src2(I))^2))(if normType = NORM_L2)

or

norm = forkthree((|src1-src2|_(L_(infty)))/(|src2|_(L_(infty))))(if normType = NORM_RELATIVE_INF)<BR>((|src1-src2|_(L_1))/(|src2|_(L_1)))(if normType = NORM_RELATIVE_L1)<BR>((|src1-src2|_(L_2))/(|src2|_(L_2)))(if normType = NORM_RELATIVE_L2)

The functions norm return the calculated norm.

When the mask parameter is specified and it is not empty, the norm is calculated only over the region specified by the mask.

A multi-channel input arrays are treated as a single-channel, that is, the results for all channels are combined.

Parameters:
src1 - first input array.
src2 - second input array of the same size and the same type as src1.
normType - type of the norm (see the details below).
mask - optional operation mask; it must have the same size as src1 and CV_8UC1 type.
See Also:
org.opencv.core.Core.norm

normalize

public static void normalize(Mat src,
                             Mat dst)

Normalizes the norm or value range of an array.

The functions normalize scale and shift the input array elements so that

| dst|_(L_p)= alpha

(where p=Inf, 1 or 2) when normType=NORM_INF, NORM_L1, or NORM_L2, respectively; or so that

min _I dst(I)= alpha, max _I dst(I)= beta

when normType=NORM_MINMAX (for dense arrays only). The optional mask specifies a sub-array to be normalized. This means that the norm or min-n-max are calculated over the sub-array, and then this sub-array is modified to be normalized. If you want to only use the mask to calculate the norm or min-max but modify the whole array, you can use "norm" and "Mat.convertTo".

In case of sparse matrices, only the non-zero values are analyzed and transformed. Because of this, the range transformation for sparse matrices is not allowed since it can shift the zero level.

Parameters:
src - input array.
dst - output array of the same size as src.
See Also:
org.opencv.core.Core.normalize, Mat.convertTo(org.opencv.core.Mat, int, double, double), norm(org.opencv.core.Mat, int, org.opencv.core.Mat)

normalize

public static void normalize(Mat src,
                             Mat dst,
                             double alpha,
                             double beta,
                             int norm_type)

Normalizes the norm or value range of an array.

The functions normalize scale and shift the input array elements so that

| dst|_(L_p)= alpha

(where p=Inf, 1 or 2) when normType=NORM_INF, NORM_L1, or NORM_L2, respectively; or so that

min _I dst(I)= alpha, max _I dst(I)= beta

when normType=NORM_MINMAX (for dense arrays only). The optional mask specifies a sub-array to be normalized. This means that the norm or min-n-max are calculated over the sub-array, and then this sub-array is modified to be normalized. If you want to only use the mask to calculate the norm or min-max but modify the whole array, you can use "norm" and "Mat.convertTo".

In case of sparse matrices, only the non-zero values are analyzed and transformed. Because of this, the range transformation for sparse matrices is not allowed since it can shift the zero level.

Parameters:
src - input array.
dst - output array of the same size as src.
alpha - norm value to normalize to or the lower range boundary in case of the range normalization.
beta - upper range boundary in case of the range normalization; it is not used for the norm normalization.
norm_type - a norm_type
See Also:
org.opencv.core.Core.normalize, Mat.convertTo(org.opencv.core.Mat, int, double, double), norm(org.opencv.core.Mat, int, org.opencv.core.Mat)

normalize

public static void normalize(Mat src,
                             Mat dst,
                             double alpha,
                             double beta,
                             int norm_type,
                             int dtype)

Normalizes the norm or value range of an array.

The functions normalize scale and shift the input array elements so that

| dst|_(L_p)= alpha

(where p=Inf, 1 or 2) when normType=NORM_INF, NORM_L1, or NORM_L2, respectively; or so that

min _I dst(I)= alpha, max _I dst(I)= beta

when normType=NORM_MINMAX (for dense arrays only). The optional mask specifies a sub-array to be normalized. This means that the norm or min-n-max are calculated over the sub-array, and then this sub-array is modified to be normalized. If you want to only use the mask to calculate the norm or min-max but modify the whole array, you can use "norm" and "Mat.convertTo".

In case of sparse matrices, only the non-zero values are analyzed and transformed. Because of this, the range transformation for sparse matrices is not allowed since it can shift the zero level.

Parameters:
src - input array.
dst - output array of the same size as src.
alpha - norm value to normalize to or the lower range boundary in case of the range normalization.
beta - upper range boundary in case of the range normalization; it is not used for the norm normalization.
norm_type - a norm_type
dtype - when negative, the output array has the same type as src; otherwise, it has the same number of channels as src and the depth =CV_MAT_DEPTH(dtype).
See Also:
org.opencv.core.Core.normalize, Mat.convertTo(org.opencv.core.Mat, int, double, double), norm(org.opencv.core.Mat, int, org.opencv.core.Mat)

normalize

public static void normalize(Mat src,
                             Mat dst,
                             double alpha,
                             double beta,
                             int norm_type,
                             int dtype,
                             Mat mask)

Normalizes the norm or value range of an array.

The functions normalize scale and shift the input array elements so that

| dst|_(L_p)= alpha

(where p=Inf, 1 or 2) when normType=NORM_INF, NORM_L1, or NORM_L2, respectively; or so that

min _I dst(I)= alpha, max _I dst(I)= beta

when normType=NORM_MINMAX (for dense arrays only). The optional mask specifies a sub-array to be normalized. This means that the norm or min-n-max are calculated over the sub-array, and then this sub-array is modified to be normalized. If you want to only use the mask to calculate the norm or min-max but modify the whole array, you can use "norm" and "Mat.convertTo".

In case of sparse matrices, only the non-zero values are analyzed and transformed. Because of this, the range transformation for sparse matrices is not allowed since it can shift the zero level.

Parameters:
src - input array.
dst - output array of the same size as src.
alpha - norm value to normalize to or the lower range boundary in case of the range normalization.
beta - upper range boundary in case of the range normalization; it is not used for the norm normalization.
norm_type - a norm_type
dtype - when negative, the output array has the same type as src; otherwise, it has the same number of channels as src and the depth =CV_MAT_DEPTH(dtype).
mask - optional operation mask.
See Also:
org.opencv.core.Core.normalize, Mat.convertTo(org.opencv.core.Mat, int, double, double), norm(org.opencv.core.Mat, int, org.opencv.core.Mat)

patchNaNs

public static void patchNaNs(Mat a)

patchNaNs

public static void patchNaNs(Mat a,
                             double val)

PCABackProject

public static void PCABackProject(Mat data,
                                  Mat mean,
                                  Mat eigenvectors,
                                  Mat result)

PCACompute

public static void PCACompute(Mat data,
                              Mat mean,
                              Mat eigenvectors)

PCACompute

public static void PCACompute(Mat data,
                              Mat mean,
                              Mat eigenvectors,
                              int maxComponents)

PCAComputeVar

public static void PCAComputeVar(Mat data,
                                 Mat mean,
                                 Mat eigenvectors,
                                 double retainedVariance)

PCAProject

public static void PCAProject(Mat data,
                              Mat mean,
                              Mat eigenvectors,
                              Mat result)

perspectiveTransform

public static void perspectiveTransform(Mat src,
                                        Mat dst,
                                        Mat m)

Performs the perspective matrix transformation of vectors.

The function perspectiveTransform transforms every element of src by treating it as a 2D or 3D vector, in the following way:

(x, y, z) -> (x'/w, y'/w, z'/w)

where

(x', y', z', w') = mat * x y z 1

and

w = w' if w' != 0; infty otherwise

Here a 3D vector transformation is shown. In case of a 2D vector transformation, the z component is omitted.

Note: The function transforms a sparse set of 2D or 3D vectors. If you want to transform an image using perspective transformation, use "warpPerspective". If you have an inverse problem, that is, you want to compute the most probable perspective transformation out of several pairs of corresponding points, you can use "getPerspectiveTransform" or "findHomography".

Parameters:
src - input two-channel or three-channel floating-point array; each element is a 2D/3D vector to be transformed.
dst - output array of the same size and type as src.
m - 3x3 or 4x4 floating-point transformation matrix.
See Also:
org.opencv.core.Core.perspectiveTransform, Calib3d.findHomography(org.opencv.core.MatOfPoint2f, org.opencv.core.MatOfPoint2f, int, double, org.opencv.core.Mat), Imgproc.warpPerspective(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Size, int, int, org.opencv.core.Scalar), transform(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat), Imgproc.getPerspectiveTransform(org.opencv.core.Mat, org.opencv.core.Mat)

phase

public static void phase(Mat x,
                         Mat y,
                         Mat angle)

Calculates the rotation angle of 2D vectors.

The function phase calculates the rotation angle of each 2D vector that is formed from the corresponding elements of x and y :

angle(I) = atan2(y(I), x(I))

The angle estimation accuracy is about 0.3 degrees. When x(I)=y(I)=0, the corresponding angle(I) is set to 0.

Parameters:
x - input floating-point array of x-coordinates of 2D vectors.
y - input array of y-coordinates of 2D vectors; it must have the same size and the same type as x.
angle - output array of vector angles; it has the same size and same type as x.
See Also:
org.opencv.core.Core.phase

phase

public static void phase(Mat x,
                         Mat y,
                         Mat angle,
                         boolean angleInDegrees)

Calculates the rotation angle of 2D vectors.

The function phase calculates the rotation angle of each 2D vector that is formed from the corresponding elements of x and y :

angle(I) = atan2(y(I), x(I))

The angle estimation accuracy is about 0.3 degrees. When x(I)=y(I)=0, the corresponding angle(I) is set to 0.

Parameters:
x - input floating-point array of x-coordinates of 2D vectors.
y - input array of y-coordinates of 2D vectors; it must have the same size and the same type as x.
angle - output array of vector angles; it has the same size and same type as x.
angleInDegrees - when true, the function calculates the angle in degrees, otherwise, they are measured in radians.
See Also:
org.opencv.core.Core.phase

polarToCart

public static void polarToCart(Mat magnitude,
                               Mat angle,
                               Mat x,
                               Mat y)

Calculates x and y coordinates of 2D vectors from their magnitude and angle.

The function polarToCart calculates the Cartesian coordinates of each 2D vector represented by the corresponding elements of magnitude and angle :

x(I) = magnitude(I) cos(angle(I)) y(I) = magnitude(I) sin(angle(I))

The relative accuracy of the estimated coordinates is about 1e-6.

Parameters:
magnitude - input floating-point array of magnitudes of 2D vectors; it can be an empty matrix (=Mat()), in this case, the function assumes that all the magnitudes are =1; if it is not empty, it must have the same size and type as angle.
angle - input floating-point array of angles of 2D vectors.
x - output array of x-coordinates of 2D vectors; it has the same size and type as angle.
y - output array of y-coordinates of 2D vectors; it has the same size and type as angle.
See Also:
org.opencv.core.Core.polarToCart, log(org.opencv.core.Mat, org.opencv.core.Mat), cartToPolar(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, boolean), pow(org.opencv.core.Mat, double, org.opencv.core.Mat), sqrt(org.opencv.core.Mat, org.opencv.core.Mat), magnitude(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat), exp(org.opencv.core.Mat, org.opencv.core.Mat), phase(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, boolean)

polarToCart

public static void polarToCart(Mat magnitude,
                               Mat angle,
                               Mat x,
                               Mat y,
                               boolean angleInDegrees)

Calculates x and y coordinates of 2D vectors from their magnitude and angle.

The function polarToCart calculates the Cartesian coordinates of each 2D vector represented by the corresponding elements of magnitude and angle :

x(I) = magnitude(I) cos(angle(I)) y(I) = magnitude(I) sin(angle(I))

The relative accuracy of the estimated coordinates is about 1e-6.

Parameters:
magnitude - input floating-point array of magnitudes of 2D vectors; it can be an empty matrix (=Mat()), in this case, the function assumes that all the magnitudes are =1; if it is not empty, it must have the same size and type as angle.
angle - input floating-point array of angles of 2D vectors.
x - output array of x-coordinates of 2D vectors; it has the same size and type as angle.
y - output array of y-coordinates of 2D vectors; it has the same size and type as angle.
angleInDegrees - when true, the input angles are measured in degrees, otherwise, they are measured in radians.
See Also:
org.opencv.core.Core.polarToCart, log(org.opencv.core.Mat, org.opencv.core.Mat), cartToPolar(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, boolean), pow(org.opencv.core.Mat, double, org.opencv.core.Mat), sqrt(org.opencv.core.Mat, org.opencv.core.Mat), magnitude(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat), exp(org.opencv.core.Mat, org.opencv.core.Mat), phase(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, boolean)

polylines

public static void polylines(Mat img,
                             java.util.List<MatOfPoint> pts,
                             boolean isClosed,
                             Scalar color)

Draws several polygonal curves.

The function polylines draws one or more polygonal curves.

Parameters:
img - Image.
pts - Array of polygonal curves.
isClosed - Flag indicating whether the drawn polylines are closed or not. If they are closed, the function draws a line from the last vertex of each curve to its first vertex.
color - Polyline color.
See Also:
org.opencv.core.Core.polylines

polylines

public static void polylines(Mat img,
                             java.util.List<MatOfPoint> pts,
                             boolean isClosed,
                             Scalar color,
                             int thickness)

Draws several polygonal curves.

The function polylines draws one or more polygonal curves.

Parameters:
img - Image.
pts - Array of polygonal curves.
isClosed - Flag indicating whether the drawn polylines are closed or not. If they are closed, the function draws a line from the last vertex of each curve to its first vertex.
color - Polyline color.
thickness - Thickness of the polyline edges.
See Also:
org.opencv.core.Core.polylines

polylines

public static void polylines(Mat img,
                             java.util.List<MatOfPoint> pts,
                             boolean isClosed,
                             Scalar color,
                             int thickness,
                             int lineType,
                             int shift)

Draws several polygonal curves.

The function polylines draws one or more polygonal curves.

Parameters:
img - Image.
pts - Array of polygonal curves.
isClosed - Flag indicating whether the drawn polylines are closed or not. If they are closed, the function draws a line from the last vertex of each curve to its first vertex.
color - Polyline color.
thickness - Thickness of the polyline edges.
lineType - Type of the line segments. See the "line" description.
shift - Number of fractional bits in the vertex coordinates.
See Also:
org.opencv.core.Core.polylines

pow

public static void pow(Mat src,
                       double power,
                       Mat dst)

Raises every array element to a power.

The function pow raises every element of the input array to power :

dst(I) = src(I)^power if power is integer; |src(I)|^power otherwise<BR>So, for a non-integer power exponent, the absolute values of input array elements are used. However, it is possible to get true values for negative values using some extra operations. In the example below, computing the 5th root of array src shows: <BR><code>

// C++ code:

Mat mask = src < 0;

pow(src, 1./5, dst);

subtract(Scalar.all(0), dst, dst, mask);

For some values of power, such as integer values, 0.5 and -0.5, specialized faster algorithms are used.

Special values (NaN, Inf) are not handled.

Parameters:
src - input array.
power - exponent of power.
dst - output array of the same size and type as src.
See Also:
org.opencv.core.Core.pow, cartToPolar(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, boolean), polarToCart(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, boolean), exp(org.opencv.core.Mat, org.opencv.core.Mat), sqrt(org.opencv.core.Mat, org.opencv.core.Mat), log(org.opencv.core.Mat, org.opencv.core.Mat)

putText

public static void putText(Mat img,
                           java.lang.String text,
                           Point org,
                           int fontFace,
                           double fontScale,
                           Scalar color)

Draws a text string.

The function putText renders the specified text string in the image. Symbols that cannot be rendered using the specified font are replaced by question marks. See "getTextSize" for a text rendering code example.

Parameters:
img - Image.
text - Text string to be drawn.
org - Bottom-left corner of the text string in the image.
fontFace - Font type. One of FONT_HERSHEY_SIMPLEX, FONT_HERSHEY_PLAIN, FONT_HERSHEY_DUPLEX, FONT_HERSHEY_COMPLEX, FONT_HERSHEY_TRIPLEX, FONT_HERSHEY_COMPLEX_SMALL, FONT_HERSHEY_SCRIPT_SIMPLEX, or FONT_HERSHEY_SCRIPT_COMPLEX, where each of the font ID's can be combined with FONT_HERSHEY_ITALIC to get the slanted letters.
fontScale - Font scale factor that is multiplied by the font-specific base size.
color - Text color.
See Also:
org.opencv.core.Core.putText

putText

public static void putText(Mat img,
                           java.lang.String text,
                           Point org,
                           int fontFace,
                           double fontScale,
                           Scalar color,
                           int thickness)

Draws a text string.

The function putText renders the specified text string in the image. Symbols that cannot be rendered using the specified font are replaced by question marks. See "getTextSize" for a text rendering code example.

Parameters:
img - Image.
text - Text string to be drawn.
org - Bottom-left corner of the text string in the image.
fontFace - Font type. One of FONT_HERSHEY_SIMPLEX, FONT_HERSHEY_PLAIN, FONT_HERSHEY_DUPLEX, FONT_HERSHEY_COMPLEX, FONT_HERSHEY_TRIPLEX, FONT_HERSHEY_COMPLEX_SMALL, FONT_HERSHEY_SCRIPT_SIMPLEX, or FONT_HERSHEY_SCRIPT_COMPLEX, where each of the font ID's can be combined with FONT_HERSHEY_ITALIC to get the slanted letters.
fontScale - Font scale factor that is multiplied by the font-specific base size.
color - Text color.
thickness - Thickness of the lines used to draw a text.
See Also:
org.opencv.core.Core.putText

putText

public static void putText(Mat img,
                           java.lang.String text,
                           Point org,
                           int fontFace,
                           double fontScale,
                           Scalar color,
                           int thickness,
                           int lineType,
                           boolean bottomLeftOrigin)

Draws a text string.

The function putText renders the specified text string in the image. Symbols that cannot be rendered using the specified font are replaced by question marks. See "getTextSize" for a text rendering code example.

Parameters:
img - Image.
text - Text string to be drawn.
org - Bottom-left corner of the text string in the image.
fontFace - Font type. One of FONT_HERSHEY_SIMPLEX, FONT_HERSHEY_PLAIN, FONT_HERSHEY_DUPLEX, FONT_HERSHEY_COMPLEX, FONT_HERSHEY_TRIPLEX, FONT_HERSHEY_COMPLEX_SMALL, FONT_HERSHEY_SCRIPT_SIMPLEX, or FONT_HERSHEY_SCRIPT_COMPLEX, where each of the font ID's can be combined with FONT_HERSHEY_ITALIC to get the slanted letters.
fontScale - Font scale factor that is multiplied by the font-specific base size.
color - Text color.
thickness - Thickness of the lines used to draw a text.
lineType - Line type. See the line for details.
bottomLeftOrigin - When true, the image data origin is at the bottom-left corner. Otherwise, it is at the top-left corner.
See Also:
org.opencv.core.Core.putText

randn

public static void randn(Mat dst,
                         double mean,
                         double stddev)

Fills the array with normally distributed random numbers.

The function randn fills the matrix dst with normally distributed random numbers with the specified mean vector and the standard deviation matrix. The generated random numbers are clipped to fit the value range of the output array data type.

Parameters:
dst - output array of random numbers; the array must be pre-allocated and have 1 to 4 channels.
mean - mean value (expectation) of the generated random numbers.
stddev - standard deviation of the generated random numbers; it can be either a vector (in which case a diagonal standard deviation matrix is assumed) or a square matrix.
See Also:
org.opencv.core.Core.randn, randu(org.opencv.core.Mat, double, double)

randShuffle

public static void randShuffle(Mat dst)

randShuffle

public static void randShuffle(Mat dst,
                               double iterFactor)

randu

public static void randu(Mat dst,
                         double low,
                         double high)

Generates a single uniformly-distributed random number or an array of random numbers.

The template functions randu generate and return the next uniformly-distributed random value of the specified type. randu() is an equivalent to (int)theRNG();, and so on. See "RNG" description.

The second non-template variant of the function fills the matrix dst with uniformly-distributed random numbers from the specified range:

low _c <= dst(I)_c < high _c

Parameters:
dst - output array of random numbers; the array must be pre-allocated.
low - inclusive lower boundary of the generated random numbers.
high - exclusive upper boundary of the generated random numbers.
See Also:
org.opencv.core.Core.randu, randn(org.opencv.core.Mat, double, double)

rectangle

public static void rectangle(Mat img,
                             Point pt1,
                             Point pt2,
                             Scalar color)

Draws a simple, thick, or filled up-right rectangle.

The function rectangle draws a rectangle outline or a filled rectangle whose two opposite corners are pt1 and pt2, or r.tl() and r.br()-Point(1,1).

Parameters:
img - Image.
pt1 - Vertex of the rectangle.
pt2 - Vertex of the rectangle opposite to pt1.
color - Rectangle color or brightness (grayscale image).
See Also:
org.opencv.core.Core.rectangle

rectangle

public static void rectangle(Mat img,
                             Point pt1,
                             Point pt2,
                             Scalar color,
                             int thickness)

Draws a simple, thick, or filled up-right rectangle.

The function rectangle draws a rectangle outline or a filled rectangle whose two opposite corners are pt1 and pt2, or r.tl() and r.br()-Point(1,1).

Parameters:
img - Image.
pt1 - Vertex of the rectangle.
pt2 - Vertex of the rectangle opposite to pt1.
color - Rectangle color or brightness (grayscale image).
thickness - Thickness of lines that make up the rectangle. Negative values, like CV_FILLED, mean that the function has to draw a filled rectangle.
See Also:
org.opencv.core.Core.rectangle

rectangle

public static void rectangle(Mat img,
                             Point pt1,
                             Point pt2,
                             Scalar color,
                             int thickness,
                             int lineType,
                             int shift)

Draws a simple, thick, or filled up-right rectangle.

The function rectangle draws a rectangle outline or a filled rectangle whose two opposite corners are pt1 and pt2, or r.tl() and r.br()-Point(1,1).

Parameters:
img - Image.
pt1 - Vertex of the rectangle.
pt2 - Vertex of the rectangle opposite to pt1.
color - Rectangle color or brightness (grayscale image).
thickness - Thickness of lines that make up the rectangle. Negative values, like CV_FILLED, mean that the function has to draw a filled rectangle.
lineType - Type of the line. See the "line" description.
shift - Number of fractional bits in the point coordinates.
See Also:
org.opencv.core.Core.rectangle

reduce

public static void reduce(Mat src,
                          Mat dst,
                          int dim,
                          int rtype)

Reduces a matrix to a vector.

The function reduce reduces the matrix to a vector by treating the matrix rows/columns as a set of 1D vectors and performing the specified operation on the vectors until a single row/column is obtained. For example, the function can be used to compute horizontal and vertical projections of a raster image. In case of CV_REDUCE_SUM and CV_REDUCE_AVG, the output may have a larger element bit-depth to preserve accuracy. And multi-channel arrays are also supported in these two reduction modes.

Parameters:
src - input 2D matrix.
dst - output vector. Its size and type is defined by dim and dtype parameters.
dim - dimension index along which the matrix is reduced. 0 means that the matrix is reduced to a single row. 1 means that the matrix is reduced to a single column.
rtype - reduction operation that could be one of the following:
  • CV_REDUCE_SUM: the output is the sum of all rows/columns of the matrix.
  • CV_REDUCE_AVG: the output is the mean vector of all rows/columns of the matrix.
  • CV_REDUCE_MAX: the output is the maximum (column/row-wise) of all rows/columns of the matrix.
  • CV_REDUCE_MIN: the output is the minimum (column/row-wise) of all rows/columns of the matrix.
See Also:
org.opencv.core.Core.reduce, repeat(org.opencv.core.Mat, int, int, org.opencv.core.Mat)

reduce

public static void reduce(Mat src,
                          Mat dst,
                          int dim,
                          int rtype,
                          int dtype)

Reduces a matrix to a vector.

The function reduce reduces the matrix to a vector by treating the matrix rows/columns as a set of 1D vectors and performing the specified operation on the vectors until a single row/column is obtained. For example, the function can be used to compute horizontal and vertical projections of a raster image. In case of CV_REDUCE_SUM and CV_REDUCE_AVG, the output may have a larger element bit-depth to preserve accuracy. And multi-channel arrays are also supported in these two reduction modes.

Parameters:
src - input 2D matrix.
dst - output vector. Its size and type is defined by dim and dtype parameters.
dim - dimension index along which the matrix is reduced. 0 means that the matrix is reduced to a single row. 1 means that the matrix is reduced to a single column.
rtype - reduction operation that could be one of the following:
  • CV_REDUCE_SUM: the output is the sum of all rows/columns of the matrix.
  • CV_REDUCE_AVG: the output is the mean vector of all rows/columns of the matrix.
  • CV_REDUCE_MAX: the output is the maximum (column/row-wise) of all rows/columns of the matrix.
  • CV_REDUCE_MIN: the output is the minimum (column/row-wise) of all rows/columns of the matrix.
dtype - when negative, the output vector will have the same type as the input matrix, otherwise, its type will be CV_MAKE_TYPE(CV_MAT_DEPTH(dtype), src.channels()).
See Also:
org.opencv.core.Core.reduce, repeat(org.opencv.core.Mat, int, int, org.opencv.core.Mat)

repeat

public static void repeat(Mat src,
                          int ny,
                          int nx,
                          Mat dst)

Fills the output array with repeated copies of the input array.

The functions "repeat" duplicate the input array one or more times along each of the two axes:

dst _(ij)= src _(i mod src.rows, j mod src.cols)

The second variant of the function is more convenient to use with "MatrixExpressions".

Parameters:
src - input array to replicate.
ny - Flag to specify how many times the src is repeated along the vertical axis.
nx - Flag to specify how many times the src is repeated along the horizontal axis.
dst - output array of the same type as src.
See Also:
org.opencv.core.Core.repeat, reduce(org.opencv.core.Mat, org.opencv.core.Mat, int, int, int)

scaleAdd

public static void scaleAdd(Mat src1,
                            double alpha,
                            Mat src2,
                            Mat dst)

Calculates the sum of a scaled array and another array.

The function scaleAdd is one of the classical primitive linear algebra operations, known as DAXPY or SAXPY in BLAS (http://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms). It calculates the sum of a scaled array and another array:

dst(I)= scale * src1(I) + src2(I)<BR>The function can also be emulated with a matrix expression, for example: <BR><code>

// C++ code:

Mat A(3, 3, CV_64F);...

A.row(0) = A.row(1)*2 + A.row(2);

Parameters:
src1 - first input array.
alpha - a alpha
src2 - second input array of the same size and type as src1.
dst - output array of the same size and type as src1.
See Also:
org.opencv.core.Core.scaleAdd, Mat.dot(org.opencv.core.Mat), Mat.convertTo(org.opencv.core.Mat, int, double, double), addWeighted(org.opencv.core.Mat, double, org.opencv.core.Mat, double, double, org.opencv.core.Mat, int), add(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), subtract(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int)

setErrorVerbosity

public static void setErrorVerbosity(boolean verbose)

setIdentity

public static void setIdentity(Mat mtx)

Initializes a scaled identity matrix.

The function "setIdentity" initializes a scaled identity matrix:

mtx(i,j)= value if i=j; 0 otherwise<BR>The function can also be emulated using the matrix initializers and the matrix expressions: <BR><code>

// C++ code:

Mat A = Mat.eye(4, 3, CV_32F)*5;

// A will be set to [[5, 0, 0], [0, 5, 0], [0, 0, 5], [0, 0, 0]]

Parameters:
mtx - matrix to initialize (not necessarily square).
See Also:
org.opencv.core.Core.setIdentity, Mat.setTo(org.opencv.core.Scalar), Mat.ones(int, int, int), Mat.zeros(int, int, int)

setIdentity

public static void setIdentity(Mat mtx,
                               Scalar s)

Initializes a scaled identity matrix.

The function "setIdentity" initializes a scaled identity matrix:

mtx(i,j)= value if i=j; 0 otherwise<BR>The function can also be emulated using the matrix initializers and the matrix expressions: <BR><code>

// C++ code:

Mat A = Mat.eye(4, 3, CV_32F)*5;

// A will be set to [[5, 0, 0], [0, 5, 0], [0, 0, 5], [0, 0, 0]]

Parameters:
mtx - matrix to initialize (not necessarily square).
s - a s
See Also:
org.opencv.core.Core.setIdentity, Mat.setTo(org.opencv.core.Scalar), Mat.ones(int, int, int), Mat.zeros(int, int, int)

solve

public static boolean solve(Mat src1,
                            Mat src2,
                            Mat dst)

Solves one or more linear systems or least-squares problems.

The function solve solves a linear system or least-squares problem (the latter is possible with SVD or QR methods, or by specifying the flag DECOMP_NORMAL):

dst = arg min _X|src1 * X - src2|

If DECOMP_LU or DECOMP_CHOLESKY method is used, the function returns 1 if src1 (or src1^Tsrc1) is non-singular. Otherwise, it returns 0. In the latter case, dst is not valid. Other methods find a pseudo-solution in case of a singular left-hand side part.

Note: If you want to find a unity-norm solution of an under-defined singular system src1*dst=0, the function solve will not do the work. Use "SVD.solveZ" instead.

Parameters:
src1 - input matrix on the left-hand side of the system.
src2 - input matrix on the right-hand side of the system.
dst - output solution.
See Also:
org.opencv.core.Core.solve, invert(org.opencv.core.Mat, org.opencv.core.Mat, int), eigen(org.opencv.core.Mat, boolean, org.opencv.core.Mat, org.opencv.core.Mat)

solve

public static boolean solve(Mat src1,
                            Mat src2,
                            Mat dst,
                            int flags)

Solves one or more linear systems or least-squares problems.

The function solve solves a linear system or least-squares problem (the latter is possible with SVD or QR methods, or by specifying the flag DECOMP_NORMAL):

dst = arg min _X|src1 * X - src2|

If DECOMP_LU or DECOMP_CHOLESKY method is used, the function returns 1 if src1 (or src1^Tsrc1) is non-singular. Otherwise, it returns 0. In the latter case, dst is not valid. Other methods find a pseudo-solution in case of a singular left-hand side part.

Note: If you want to find a unity-norm solution of an under-defined singular system src1*dst=0, the function solve will not do the work. Use "SVD.solveZ" instead.

Parameters:
src1 - input matrix on the left-hand side of the system.
src2 - input matrix on the right-hand side of the system.
dst - output solution.
flags - solution (matrix inversion) method.
  • DECOMP_LU Gaussian elimination with optimal pivot element chosen.
  • DECOMP_CHOLESKY Cholesky LL^T factorization; the matrix src1 must be symmetrical and positively defined.
  • DECOMP_EIG eigenvalue decomposition; the matrix src1 must be symmetrical.
  • DECOMP_SVD singular value decomposition (SVD) method; the system can be over-defined and/or the matrix src1 can be singular.
  • DECOMP_QR QR factorization; the system can be over-defined and/or the matrix src1 can be singular.
  • DECOMP_NORMAL while all the previous flags are mutually exclusive, this flag can be used together with any of the previous; it means that the normal equations src1^T*src1*dst=src1^Tsrc2 are solved instead of the original system src1*dst=src2.
See Also:
org.opencv.core.Core.solve, invert(org.opencv.core.Mat, org.opencv.core.Mat, int), eigen(org.opencv.core.Mat, boolean, org.opencv.core.Mat, org.opencv.core.Mat)

solveCubic

public static int solveCubic(Mat coeffs,
                             Mat roots)

Finds the real roots of a cubic equation.

The function solveCubic finds the real roots of a cubic equation:

  • if coeffs is a 4-element vector:

coeffs [0] x^3 + coeffs [1] x^2 + coeffs [2] x + coeffs [3] = 0

  • if coeffs is a 3-element vector:

x^3 + coeffs [0] x^2 + coeffs [1] x + coeffs [2] = 0

The roots are stored in the roots array.

Parameters:
coeffs - equation coefficients, an array of 3 or 4 elements.
roots - output array of real roots that has 1 or 3 elements.
See Also:
org.opencv.core.Core.solveCubic

solvePoly

public static double solvePoly(Mat coeffs,
                               Mat roots)

Finds the real or complex roots of a polynomial equation.

The function solvePoly finds real and complex roots of a polynomial equation:

coeffs [n] x^(n) + coeffs [n-1] x^(n-1) +... + coeffs [1] x + coeffs [0] = 0

Parameters:
coeffs - array of polynomial coefficients.
roots - output (complex) array of roots.
See Also:
org.opencv.core.Core.solvePoly

solvePoly

public static double solvePoly(Mat coeffs,
                               Mat roots,
                               int maxIters)

Finds the real or complex roots of a polynomial equation.

The function solvePoly finds real and complex roots of a polynomial equation:

coeffs [n] x^(n) + coeffs [n-1] x^(n-1) +... + coeffs [1] x + coeffs [0] = 0

Parameters:
coeffs - array of polynomial coefficients.
roots - output (complex) array of roots.
maxIters - maximum number of iterations the algorithm does.
See Also:
org.opencv.core.Core.solvePoly

sort

public static void sort(Mat src,
                        Mat dst,
                        int flags)

Sorts each row or each column of a matrix.

The function sort sorts each matrix row or each matrix column in ascending or descending order. So you should pass two operation flags to get desired behaviour. If you want to sort matrix rows or columns lexicographically, you can use STL std.sort generic function with the proper comparison predicate.

Parameters:
src - input single-channel array.
dst - output array of the same size and type as src.
flags - operation flags, a combination of the following values:
  • CV_SORT_EVERY_ROW each matrix row is sorted independently.
  • CV_SORT_EVERY_COLUMN each matrix column is sorted independently; this flag and the previous one are mutually exclusive.
  • CV_SORT_ASCENDING each matrix row is sorted in the ascending order.
  • CV_SORT_DESCENDING each matrix row is sorted in the descending order; this flag and the previous one are also mutually exclusive.
See Also:
org.opencv.core.Core.sort, randShuffle(org.opencv.core.Mat, double), sortIdx(org.opencv.core.Mat, org.opencv.core.Mat, int)

sortIdx

public static void sortIdx(Mat src,
                           Mat dst,
                           int flags)

Sorts each row or each column of a matrix.

The function sortIdx sorts each matrix row or each matrix column in the ascending or descending order. So you should pass two operation flags to get desired behaviour. Instead of reordering the elements themselves, it stores the indices of sorted elements in the output array. For example:

// C++ code:

Mat A = Mat.eye(3,3,CV_32F), B;

sortIdx(A, B, CV_SORT_EVERY_ROW + CV_SORT_ASCENDING);

// B will probably contain

// (because of equal elements in A some permutations are possible):

// [[1, 2, 0], [0, 2, 1], [0, 1, 2]]

Parameters:
src - input single-channel array.
dst - output integer array of the same size as src.
flags - operation flags that could be a combination of the following values:
  • CV_SORT_EVERY_ROW each matrix row is sorted independently.
  • CV_SORT_EVERY_COLUMN each matrix column is sorted independently; this flag and the previous one are mutually exclusive.
  • CV_SORT_ASCENDING each matrix row is sorted in the ascending order.
  • CV_SORT_DESCENDING each matrix row is sorted in the descending order; his flag and the previous one are also mutually exclusive.
See Also:
org.opencv.core.Core.sortIdx, sort(org.opencv.core.Mat, org.opencv.core.Mat, int), randShuffle(org.opencv.core.Mat, double)

split

public static void split(Mat m,
                         java.util.List<Mat> mv)

Divides a multi-channel array into several single-channel arrays.

The functions split split a multi-channel array into separate single-channel arrays:

mv [c](I) = src(I)_c

If you need to extract a single channel or do some other sophisticated channel permutation, use "mixChannels".

Parameters:
m - a m
mv - output array or vector of arrays; in the first variant of the function the number of arrays must match src.channels(); the arrays themselves are reallocated, if needed.
See Also:
org.opencv.core.Core.split, merge(java.util.List, org.opencv.core.Mat), Imgproc.cvtColor(org.opencv.core.Mat, org.opencv.core.Mat, int, int), mixChannels(java.util.List, java.util.List, org.opencv.core.MatOfInt)

sqrt

public static void sqrt(Mat src,
                        Mat dst)

Calculates a square root of array elements.

The functions sqrt calculate a square root of each input array element. In case of multi-channel arrays, each channel is processed independently. The accuracy is approximately the same as of the built-in std.sqrt.

Parameters:
src - input floating-point array.
dst - output array of the same size and type as src.
See Also:
org.opencv.core.Core.sqrt, pow(org.opencv.core.Mat, double, org.opencv.core.Mat), magnitude(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat)

subtract

public static void subtract(Mat src1,
                            Mat src2,
                            Mat dst)

Calculates the per-element difference between two arrays or array and a scalar.

The function subtract calculates:

  • Difference between two arrays, when both input arrays have the same size and the same number of channels:

dst(I) = saturate(src1(I) - src2(I)) if mask(I) != 0

  • Difference between an array and a scalar, when src2 is constructed from Scalar or has the same number of elements as src1.channels():

dst(I) = saturate(src1(I) - src2) if mask(I) != 0

  • Difference between a scalar and an array, when src1 is constructed from Scalar or has the same number of elements as src2.channels():

dst(I) = saturate(src1 - src2(I)) if mask(I) != 0

  • The reverse difference between a scalar and an array in the case of SubRS:

dst(I) = saturate(src2 - src1(I)) if mask(I) != 0

where I is a multi-dimensional index of array elements. In case of multi-channel arrays, each channel is processed independently. The first function in the list above can be replaced with matrix expressions:

// C++ code:

dst = src1 - src2;

dst -= src1; // equivalent to subtract(dst, src1, dst);

The input arrays and the output array can all have the same or different depths. For example, you can subtract to 8-bit unsigned arrays and store the difference in a 16-bit signed array. Depth of the output array is determined by dtype parameter. In the second and third cases above, as well as in the first case, when src1.depth() == src2.depth(), dtype can be set to the default -1. In this case the output array will have the same depth as the input array, be it src1, src2 or both.

Note: Saturation is not applied when the output array has the depth CV_32S. You may even get result of an incorrect sign in the case of overflow.

Parameters:
src1 - first input array or a scalar.
src2 - second input array or a scalar.
dst - output array of the same size and the same number of channels as the input array.
See Also:
org.opencv.core.Core.subtract, addWeighted(org.opencv.core.Mat, double, org.opencv.core.Mat, double, double, org.opencv.core.Mat, int), add(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), scaleAdd(org.opencv.core.Mat, double, org.opencv.core.Mat, org.opencv.core.Mat), Mat.convertTo(org.opencv.core.Mat, int, double, double)

subtract

public static void subtract(Mat src1,
                            Mat src2,
                            Mat dst,
                            Mat mask)

Calculates the per-element difference between two arrays or array and a scalar.

The function subtract calculates:

  • Difference between two arrays, when both input arrays have the same size and the same number of channels:

dst(I) = saturate(src1(I) - src2(I)) if mask(I) != 0

  • Difference between an array and a scalar, when src2 is constructed from Scalar or has the same number of elements as src1.channels():

dst(I) = saturate(src1(I) - src2) if mask(I) != 0

  • Difference between a scalar and an array, when src1 is constructed from Scalar or has the same number of elements as src2.channels():

dst(I) = saturate(src1 - src2(I)) if mask(I) != 0

  • The reverse difference between a scalar and an array in the case of SubRS:

dst(I) = saturate(src2 - src1(I)) if mask(I) != 0

where I is a multi-dimensional index of array elements. In case of multi-channel arrays, each channel is processed independently. The first function in the list above can be replaced with matrix expressions:

// C++ code:

dst = src1 - src2;

dst -= src1; // equivalent to subtract(dst, src1, dst);

The input arrays and the output array can all have the same or different depths. For example, you can subtract to 8-bit unsigned arrays and store the difference in a 16-bit signed array. Depth of the output array is determined by dtype parameter. In the second and third cases above, as well as in the first case, when src1.depth() == src2.depth(), dtype can be set to the default -1. In this case the output array will have the same depth as the input array, be it src1, src2 or both.

Note: Saturation is not applied when the output array has the depth CV_32S. You may even get result of an incorrect sign in the case of overflow.

Parameters:
src1 - first input array or a scalar.
src2 - second input array or a scalar.
dst - output array of the same size and the same number of channels as the input array.
mask - optional operation mask; this is an 8-bit single channel array that specifies elements of the output array to be changed.
See Also:
org.opencv.core.Core.subtract, addWeighted(org.opencv.core.Mat, double, org.opencv.core.Mat, double, double, org.opencv.core.Mat, int), add(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), scaleAdd(org.opencv.core.Mat, double, org.opencv.core.Mat, org.opencv.core.Mat), Mat.convertTo(org.opencv.core.Mat, int, double, double)

subtract

public static void subtract(Mat src1,
                            Mat src2,
                            Mat dst,
                            Mat mask,
                            int dtype)

Calculates the per-element difference between two arrays or array and a scalar.

The function subtract calculates:

  • Difference between two arrays, when both input arrays have the same size and the same number of channels:

dst(I) = saturate(src1(I) - src2(I)) if mask(I) != 0

  • Difference between an array and a scalar, when src2 is constructed from Scalar or has the same number of elements as src1.channels():

dst(I) = saturate(src1(I) - src2) if mask(I) != 0

  • Difference between a scalar and an array, when src1 is constructed from Scalar or has the same number of elements as src2.channels():

dst(I) = saturate(src1 - src2(I)) if mask(I) != 0

  • The reverse difference between a scalar and an array in the case of SubRS:

dst(I) = saturate(src2 - src1(I)) if mask(I) != 0

where I is a multi-dimensional index of array elements. In case of multi-channel arrays, each channel is processed independently. The first function in the list above can be replaced with matrix expressions:

// C++ code:

dst = src1 - src2;

dst -= src1; // equivalent to subtract(dst, src1, dst);

The input arrays and the output array can all have the same or different depths. For example, you can subtract to 8-bit unsigned arrays and store the difference in a 16-bit signed array. Depth of the output array is determined by dtype parameter. In the second and third cases above, as well as in the first case, when src1.depth() == src2.depth(), dtype can be set to the default -1. In this case the output array will have the same depth as the input array, be it src1, src2 or both.

Note: Saturation is not applied when the output array has the depth CV_32S. You may even get result of an incorrect sign in the case of overflow.

Parameters:
src1 - first input array or a scalar.
src2 - second input array or a scalar.
dst - output array of the same size and the same number of channels as the input array.
mask - optional operation mask; this is an 8-bit single channel array that specifies elements of the output array to be changed.
dtype - optional depth of the output array (see the details below).
See Also:
org.opencv.core.Core.subtract, addWeighted(org.opencv.core.Mat, double, org.opencv.core.Mat, double, double, org.opencv.core.Mat, int), add(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), scaleAdd(org.opencv.core.Mat, double, org.opencv.core.Mat, org.opencv.core.Mat), Mat.convertTo(org.opencv.core.Mat, int, double, double)

subtract

public static void subtract(Mat src1,
                            Scalar src2,
                            Mat dst)

Calculates the per-element difference between two arrays or array and a scalar.

The function subtract calculates:

  • Difference between two arrays, when both input arrays have the same size and the same number of channels:

dst(I) = saturate(src1(I) - src2(I)) if mask(I) != 0

  • Difference between an array and a scalar, when src2 is constructed from Scalar or has the same number of elements as src1.channels():

dst(I) = saturate(src1(I) - src2) if mask(I) != 0

  • Difference between a scalar and an array, when src1 is constructed from Scalar or has the same number of elements as src2.channels():

dst(I) = saturate(src1 - src2(I)) if mask(I) != 0

  • The reverse difference between a scalar and an array in the case of SubRS:

dst(I) = saturate(src2 - src1(I)) if mask(I) != 0

where I is a multi-dimensional index of array elements. In case of multi-channel arrays, each channel is processed independently. The first function in the list above can be replaced with matrix expressions:

// C++ code:

dst = src1 - src2;

dst -= src1; // equivalent to subtract(dst, src1, dst);

The input arrays and the output array can all have the same or different depths. For example, you can subtract to 8-bit unsigned arrays and store the difference in a 16-bit signed array. Depth of the output array is determined by dtype parameter. In the second and third cases above, as well as in the first case, when src1.depth() == src2.depth(), dtype can be set to the default -1. In this case the output array will have the same depth as the input array, be it src1, src2 or both.

Note: Saturation is not applied when the output array has the depth CV_32S. You may even get result of an incorrect sign in the case of overflow.

Parameters:
src1 - first input array or a scalar.
src2 - second input array or a scalar.
dst - output array of the same size and the same number of channels as the input array.
See Also:
org.opencv.core.Core.subtract, addWeighted(org.opencv.core.Mat, double, org.opencv.core.Mat, double, double, org.opencv.core.Mat, int), add(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), scaleAdd(org.opencv.core.Mat, double, org.opencv.core.Mat, org.opencv.core.Mat), Mat.convertTo(org.opencv.core.Mat, int, double, double)

subtract

public static void subtract(Mat src1,
                            Scalar src2,
                            Mat dst,
                            Mat mask)

Calculates the per-element difference between two arrays or array and a scalar.

The function subtract calculates:

  • Difference between two arrays, when both input arrays have the same size and the same number of channels:

dst(I) = saturate(src1(I) - src2(I)) if mask(I) != 0

  • Difference between an array and a scalar, when src2 is constructed from Scalar or has the same number of elements as src1.channels():

dst(I) = saturate(src1(I) - src2) if mask(I) != 0

  • Difference between a scalar and an array, when src1 is constructed from Scalar or has the same number of elements as src2.channels():

dst(I) = saturate(src1 - src2(I)) if mask(I) != 0

  • The reverse difference between a scalar and an array in the case of SubRS:

dst(I) = saturate(src2 - src1(I)) if mask(I) != 0

where I is a multi-dimensional index of array elements. In case of multi-channel arrays, each channel is processed independently. The first function in the list above can be replaced with matrix expressions:

// C++ code:

dst = src1 - src2;

dst -= src1; // equivalent to subtract(dst, src1, dst);

The input arrays and the output array can all have the same or different depths. For example, you can subtract to 8-bit unsigned arrays and store the difference in a 16-bit signed array. Depth of the output array is determined by dtype parameter. In the second and third cases above, as well as in the first case, when src1.depth() == src2.depth(), dtype can be set to the default -1. In this case the output array will have the same depth as the input array, be it src1, src2 or both.

Note: Saturation is not applied when the output array has the depth CV_32S. You may even get result of an incorrect sign in the case of overflow.

Parameters:
src1 - first input array or a scalar.
src2 - second input array or a scalar.
dst - output array of the same size and the same number of channels as the input array.
mask - optional operation mask; this is an 8-bit single channel array that specifies elements of the output array to be changed.
See Also:
org.opencv.core.Core.subtract, addWeighted(org.opencv.core.Mat, double, org.opencv.core.Mat, double, double, org.opencv.core.Mat, int), add(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), scaleAdd(org.opencv.core.Mat, double, org.opencv.core.Mat, org.opencv.core.Mat), Mat.convertTo(org.opencv.core.Mat, int, double, double)

subtract

public static void subtract(Mat src1,
                            Scalar src2,
                            Mat dst,
                            Mat mask,
                            int dtype)

Calculates the per-element difference between two arrays or array and a scalar.

The function subtract calculates:

  • Difference between two arrays, when both input arrays have the same size and the same number of channels:

dst(I) = saturate(src1(I) - src2(I)) if mask(I) != 0

  • Difference between an array and a scalar, when src2 is constructed from Scalar or has the same number of elements as src1.channels():

dst(I) = saturate(src1(I) - src2) if mask(I) != 0

  • Difference between a scalar and an array, when src1 is constructed from Scalar or has the same number of elements as src2.channels():

dst(I) = saturate(src1 - src2(I)) if mask(I) != 0

  • The reverse difference between a scalar and an array in the case of SubRS:

dst(I) = saturate(src2 - src1(I)) if mask(I) != 0

where I is a multi-dimensional index of array elements. In case of multi-channel arrays, each channel is processed independently. The first function in the list above can be replaced with matrix expressions:

// C++ code:

dst = src1 - src2;

dst -= src1; // equivalent to subtract(dst, src1, dst);

The input arrays and the output array can all have the same or different depths. For example, you can subtract to 8-bit unsigned arrays and store the difference in a 16-bit signed array. Depth of the output array is determined by dtype parameter. In the second and third cases above, as well as in the first case, when src1.depth() == src2.depth(), dtype can be set to the default -1. In this case the output array will have the same depth as the input array, be it src1, src2 or both.

Note: Saturation is not applied when the output array has the depth CV_32S. You may even get result of an incorrect sign in the case of overflow.

Parameters:
src1 - first input array or a scalar.
src2 - second input array or a scalar.
dst - output array of the same size and the same number of channels as the input array.
mask - optional operation mask; this is an 8-bit single channel array that specifies elements of the output array to be changed.
dtype - optional depth of the output array (see the details below).
See Also:
org.opencv.core.Core.subtract, addWeighted(org.opencv.core.Mat, double, org.opencv.core.Mat, double, double, org.opencv.core.Mat, int), add(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, int), scaleAdd(org.opencv.core.Mat, double, org.opencv.core.Mat, org.opencv.core.Mat), Mat.convertTo(org.opencv.core.Mat, int, double, double)

sumElems

public static Scalar sumElems(Mat src)

Calculates the sum of array elements.

The functions sum calculate and return the sum of array elements, independently for each channel.

Parameters:
src - a src
See Also:
org.opencv.core.Core.sum, meanStdDev(org.opencv.core.Mat, org.opencv.core.MatOfDouble, org.opencv.core.MatOfDouble, org.opencv.core.Mat), reduce(org.opencv.core.Mat, org.opencv.core.Mat, int, int, int), minMaxLoc(org.opencv.core.Mat, org.opencv.core.Mat), countNonZero(org.opencv.core.Mat), norm(org.opencv.core.Mat, int, org.opencv.core.Mat), mean(org.opencv.core.Mat, org.opencv.core.Mat)

SVBackSubst

public static void SVBackSubst(Mat w,
                               Mat u,
                               Mat vt,
                               Mat rhs,
                               Mat dst)

SVDecomp

public static void SVDecomp(Mat src,
                            Mat w,
                            Mat u,
                            Mat vt)

SVDecomp

public static void SVDecomp(Mat src,
                            Mat w,
                            Mat u,
                            Mat vt,
                            int flags)

trace

public static Scalar trace(Mat mtx)

Returns the trace of a matrix.

The function trace returns the sum of the diagonal elements of the matrix mtx.

tr(mtx) = sum _i mtx(i,i)

Parameters:
mtx - a mtx
See Also:
org.opencv.core.Core.trace

transform

public static void transform(Mat src,
                             Mat dst,
                             Mat m)

Performs the matrix transformation of every array element.

The function transform performs the matrix transformation of every element of the array src and stores the results in dst :

dst(I) = m * src(I)

(when m.cols=src.channels()), or

dst(I) = m * [ src(I); 1]

(when m.cols=src.channels()+1)

Every element of the N -channel array src is interpreted as N -element vector that is transformed using the M x N or M x (N+1) matrix m to M-element vector - the corresponding element of the output array dst.

The function may be used for geometrical transformation of N -dimensional points, arbitrary linear color space transformation (such as various kinds of RGB to YUV transforms), shuffling the image channels, and so forth.

Parameters:
src - input array that must have as many channels (1 to 4) as m.cols or m.cols-1.
dst - output array of the same size and depth as src; it has as many channels as m.rows.
m - transformation 2x2 or 2x3 floating-point matrix.
See Also:
org.opencv.core.Core.transform, Imgproc.warpAffine(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Size, int, int, org.opencv.core.Scalar), perspectiveTransform(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat), Imgproc.warpPerspective(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Size, int, int, org.opencv.core.Scalar), Imgproc.getAffineTransform(org.opencv.core.MatOfPoint2f, org.opencv.core.MatOfPoint2f), Video.estimateRigidTransform(org.opencv.core.Mat, org.opencv.core.Mat, boolean)

transpose

public static void transpose(Mat src,
                             Mat dst)

Transposes a matrix.

The function "transpose" transposes the matrix src :

dst(i,j) = src(j,i)

Note: No complex conjugation is done in case of a complex matrix. It it should be done separately if needed.

Parameters:
src - input array.
dst - output array of the same type as src.
See Also:
org.opencv.core.Core.transpose

vconcat

public static void vconcat(java.util.List<Mat> src,
                           Mat dst)

OpenCV 2.4.7 Documentation