OpenCV  5.0.0alpha
Open Source Computer Vision
Loading...
Searching...
No Matches
cv::cuda::GpuMat Class Reference

Base storage class for GPU memory with reference counting. More...

#include <opencv2/core/cuda.hpp>

Collaboration diagram for cv::cuda::GpuMat:

Classes

class  Allocator
 

Public Member Functions

 GpuMat (const GpuMat &m)
 copy constructor
 
 GpuMat (const GpuMat &m, Range rowRange, Range colRange)
 creates a GpuMat header for a part of the bigger matrix
 
 GpuMat (const GpuMat &m, Rect roi)
 
 GpuMat (GpuMat::Allocator *allocator=GpuMat::defaultAllocator())
 default constructor
 
 GpuMat (InputArray arr, GpuMat::Allocator *allocator=GpuMat::defaultAllocator())
 builds GpuMat from host memory (Blocking call)
 
 GpuMat (int rows, int cols, int type, GpuMat::Allocator *allocator=GpuMat::defaultAllocator())
 constructs GpuMat of the specified size and type
 
 GpuMat (int rows, int cols, int type, Scalar s, GpuMat::Allocator *allocator=GpuMat::defaultAllocator())
 constructs GpuMat and fills it with the specified value _s
 
 GpuMat (int rows, int cols, int type, void *data, size_t step=Mat::AUTO_STEP)
 constructor for GpuMat headers pointing to user-allocated data
 
 GpuMat (Size size, int type, GpuMat::Allocator *allocator=GpuMat::defaultAllocator())
 
 GpuMat (Size size, int type, Scalar s, GpuMat::Allocator *allocator=GpuMat::defaultAllocator())
 
 GpuMat (Size size, int type, void *data, size_t step=Mat::AUTO_STEP)
 
 ~GpuMat ()
 destructor - calls release()
 
GpuMatadjustROI (int dtop, int dbottom, int dleft, int dright)
 moves/resizes the current GpuMat ROI inside the parent GpuMat
 
void assignTo (GpuMat &m, int type=-1) const
 
int channels () const
 returns number of channels
 
GpuMat clone () const
 returns deep copy of the GpuMat, i.e. the data is copied
 
GpuMat col (int x) const
 returns a new GpuMat header for the specified column
 
GpuMat colRange (int startcol, int endcol) const
 ... for the specified column span
 
GpuMat colRange (Range r) const
 
void convertTo (GpuMat &dst, int rtype, double alpha, double beta, Stream &stream) const
 bindings overload which converts GpuMat to another datatype with scaling (Non-Blocking call)
 
void convertTo (GpuMat &dst, int rtype, double alpha=1.0, double beta=0.0) const
 bindings overload which converts GpuMat to another datatype with scaling(Blocking call)
 
void convertTo (GpuMat &dst, int rtype, Stream &stream) const
 bindings overload which converts GpuMat to another datatype (Non-Blocking call)
 
void convertTo (OutputArray dst, int rtype) const
 converts GpuMat to another datatype (Blocking call)
 
void convertTo (OutputArray dst, int rtype, double alpha, double beta, Stream &stream) const
 converts GpuMat to another datatype with scaling (Non-Blocking call)
 
void convertTo (OutputArray dst, int rtype, double alpha, double beta=0.0) const
 converts GpuMat to another datatype with scaling (Blocking call)
 
void convertTo (OutputArray dst, int rtype, double alpha, Stream &stream) const
 converts GpuMat to another datatype with scaling (Non-Blocking call)
 
void convertTo (OutputArray dst, int rtype, Stream &stream) const
 converts GpuMat to another datatype (Non-Blocking call)
 
void copyTo (GpuMat &dst) const
 bindings overload which copies the GpuMat content to device memory (Blocking call)
 
void copyTo (GpuMat &dst, GpuMat &mask) const
 bindings overload which copies those GpuMat elements to "m" that are marked with non-zero mask elements (Blocking call)
 
void copyTo (GpuMat &dst, GpuMat &mask, Stream &stream) const
 bindings overload which copies those GpuMat elements to "m" that are marked with non-zero mask elements (Non-Blocking call)
 
void copyTo (GpuMat &dst, Stream &stream) const
 bindings overload which copies the GpuMat content to device memory (Non-Blocking call)
 
void copyTo (OutputArray dst) const
 copies the GpuMat content to device memory (Blocking call)
 
void copyTo (OutputArray dst, InputArray mask) const
 copies those GpuMat elements to "m" that are marked with non-zero mask elements (Blocking call)
 
void copyTo (OutputArray dst, InputArray mask, Stream &stream) const
 copies those GpuMat elements to "m" that are marked with non-zero mask elements (Non-Blocking call)
 
void copyTo (OutputArray dst, Stream &stream) const
 copies the GpuMat content to device memory (Non-Blocking call)
 
void create (int rows, int cols, int type)
 allocates new GpuMat data unless the GpuMat already has specified size and type
 
void create (Size size, int type)
 
void * cudaPtr () const
 
int depth () const
 returns element type
 
void download (OutputArray dst) const
 Performs data download from GpuMat (Blocking call)
 
void download (OutputArray dst, Stream &stream) const
 Performs data download from GpuMat (Non-Blocking call)
 
size_t elemSize () const
 returns element size in bytes
 
size_t elemSize1 () const
 returns the size of element channel in bytes
 
bool empty () const
 returns true if GpuMat data is NULL
 
bool isContinuous () const
 
void locateROI (Size &wholeSize, Point &ofs) const
 locates GpuMat header within a parent GpuMat
 
template<typename _Tp >
 operator PtrStep< _Tp > () const
 
template<typename _Tp >
 operator PtrStepSz< _Tp > () const
 
GpuMat operator() (Range rowRange, Range colRange) const
 extracts a rectangular sub-GpuMat (this is a generalized form of row, rowRange etc.)
 
GpuMat operator() (Rect roi) const
 
GpuMatoperator= (const GpuMat &m)
 assignment operators
 
ucharptr (int y=0)
 returns pointer to y-th row
 
template<typename _Tp >
_Tpptr (int y=0)
 template version of the above method
 
const ucharptr (int y=0) const
 
template<typename _Tp >
const _Tpptr (int y=0) const
 
void release ()
 decreases reference counter, deallocate the data when reference counter reaches 0
 
GpuMat reshape (int cn, int rows=0) const
 
GpuMat row (int y) const
 returns a new GpuMat header for the specified row
 
GpuMat rowRange (int startrow, int endrow) const
 ... for the specified row span
 
GpuMat rowRange (Range r) const
 
GpuMatsetTo (Scalar s)
 sets some of the GpuMat elements to s (Blocking call)
 
GpuMatsetTo (Scalar s, InputArray mask)
 sets some of the GpuMat elements to s, according to the mask (Blocking call)
 
GpuMatsetTo (Scalar s, InputArray mask, Stream &stream)
 sets some of the GpuMat elements to s, according to the mask (Non-Blocking call)
 
GpuMatsetTo (Scalar s, Stream &stream)
 sets some of the GpuMat elements to s (Non-Blocking call)
 
Size size () const
 returns GpuMat size : width == number of columns, height == number of rows
 
size_t step1 () const
 returns step/elemSize1()
 
void swap (GpuMat &mat)
 swaps with other smart pointer
 
int type () const
 returns element type
 
void updateContinuityFlag ()
 internal use method: updates the continuity flag
 
void upload (InputArray arr)
 Performs data upload to GpuMat (Blocking call)
 
void upload (InputArray arr, Stream &stream)
 Performs data upload to GpuMat (Non-Blocking call)
 

Static Public Member Functions

static GpuMat::AllocatordefaultAllocator ()
 default allocator
 
static GpuMat::AllocatorgetStdAllocator ()
 
static void setDefaultAllocator (GpuMat::Allocator *allocator)
 

Public Attributes

Allocatorallocator
 allocator
 
int cols
 
uchardata
 pointer to the data
 
const uchardataend
 
uchardatastart
 helper fields used in locateROI and adjustROI
 
int flags
 
int * refcount
 
int rows
 the number of rows and columns
 
size_t step
 a distance between successive rows in bytes; includes the gap if any
 

Detailed Description

Base storage class for GPU memory with reference counting.

Its interface matches the Mat interface with the following limitations:

  • no arbitrary dimensions support (only 2D)
  • no functions that return references to their data (because references on GPU are not valid for CPU)
  • no expression templates technique support

Beware that the latter limitation may lead to overloaded matrix operators that cause memory allocations. The GpuMat class is convertible to cuda::PtrStepSz and cuda::PtrStep so it can be passed directly to the kernel.

Note
In contrast with Mat, in most cases GpuMat::isContinuous() == false . This means that rows are aligned to a size depending on the hardware. Single-row GpuMat is always a continuous matrix.
You are not recommended to leave static or global GpuMat variables allocated, that is, to rely on its destructor. The destruction order of such variables and CUDA context is undefined. GPU memory release function returns error if the CUDA context has been destroyed before.

Some member functions are described as a "Blocking Call" while some are described as a "Non-Blocking Call". Blocking functions are synchronous to host. It is guaranteed that the GPU operation is finished when the function returns. However, non-blocking functions are asynchronous to host. Those functions may return even if the GPU operation is not finished.

Compared to their blocking counterpart, non-blocking functions accept Stream as an additional argument. If a non-default stream is passed, the GPU operation may overlap with operations in other streams.

See also
Mat

Constructor & Destructor Documentation

◆ GpuMat() [1/11]

cv::cuda::GpuMat::GpuMat ( GpuMat::Allocator * allocator = GpuMat::defaultAllocator())
explicit
Python:
cv.cuda.GpuMat([, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(rows, cols, type[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(size, type[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(rows, cols, type, s[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(size, type, s[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(m) -> <cuda_GpuMat object>
cv.cuda.GpuMat(m, rowRange, colRange) -> <cuda_GpuMat object>
cv.cuda.GpuMat(m, roi) -> <cuda_GpuMat object>
cv.cuda.GpuMat(arr[, allocator]) -> <cuda_GpuMat object>

default constructor

◆ GpuMat() [2/11]

cv::cuda::GpuMat::GpuMat ( int rows,
int cols,
int type,
GpuMat::Allocator * allocator = GpuMat::defaultAllocator() )
Python:
cv.cuda.GpuMat([, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(rows, cols, type[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(size, type[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(rows, cols, type, s[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(size, type, s[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(m) -> <cuda_GpuMat object>
cv.cuda.GpuMat(m, rowRange, colRange) -> <cuda_GpuMat object>
cv.cuda.GpuMat(m, roi) -> <cuda_GpuMat object>
cv.cuda.GpuMat(arr[, allocator]) -> <cuda_GpuMat object>

constructs GpuMat of the specified size and type

◆ GpuMat() [3/11]

cv::cuda::GpuMat::GpuMat ( Size size,
int type,
GpuMat::Allocator * allocator = GpuMat::defaultAllocator() )
Python:
cv.cuda.GpuMat([, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(rows, cols, type[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(size, type[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(rows, cols, type, s[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(size, type, s[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(m) -> <cuda_GpuMat object>
cv.cuda.GpuMat(m, rowRange, colRange) -> <cuda_GpuMat object>
cv.cuda.GpuMat(m, roi) -> <cuda_GpuMat object>
cv.cuda.GpuMat(arr[, allocator]) -> <cuda_GpuMat object>

◆ GpuMat() [4/11]

cv::cuda::GpuMat::GpuMat ( int rows,
int cols,
int type,
Scalar s,
GpuMat::Allocator * allocator = GpuMat::defaultAllocator() )
Python:
cv.cuda.GpuMat([, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(rows, cols, type[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(size, type[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(rows, cols, type, s[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(size, type, s[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(m) -> <cuda_GpuMat object>
cv.cuda.GpuMat(m, rowRange, colRange) -> <cuda_GpuMat object>
cv.cuda.GpuMat(m, roi) -> <cuda_GpuMat object>
cv.cuda.GpuMat(arr[, allocator]) -> <cuda_GpuMat object>

constructs GpuMat and fills it with the specified value _s

◆ GpuMat() [5/11]

cv::cuda::GpuMat::GpuMat ( Size size,
int type,
Scalar s,
GpuMat::Allocator * allocator = GpuMat::defaultAllocator() )
Python:
cv.cuda.GpuMat([, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(rows, cols, type[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(size, type[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(rows, cols, type, s[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(size, type, s[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(m) -> <cuda_GpuMat object>
cv.cuda.GpuMat(m, rowRange, colRange) -> <cuda_GpuMat object>
cv.cuda.GpuMat(m, roi) -> <cuda_GpuMat object>
cv.cuda.GpuMat(arr[, allocator]) -> <cuda_GpuMat object>

◆ GpuMat() [6/11]

cv::cuda::GpuMat::GpuMat ( const GpuMat & m)
Python:
cv.cuda.GpuMat([, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(rows, cols, type[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(size, type[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(rows, cols, type, s[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(size, type, s[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(m) -> <cuda_GpuMat object>
cv.cuda.GpuMat(m, rowRange, colRange) -> <cuda_GpuMat object>
cv.cuda.GpuMat(m, roi) -> <cuda_GpuMat object>
cv.cuda.GpuMat(arr[, allocator]) -> <cuda_GpuMat object>

copy constructor

◆ GpuMat() [7/11]

cv::cuda::GpuMat::GpuMat ( int rows,
int cols,
int type,
void * data,
size_t step = Mat::AUTO_STEP )
Python:
cv.cuda.GpuMat([, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(rows, cols, type[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(size, type[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(rows, cols, type, s[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(size, type, s[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(m) -> <cuda_GpuMat object>
cv.cuda.GpuMat(m, rowRange, colRange) -> <cuda_GpuMat object>
cv.cuda.GpuMat(m, roi) -> <cuda_GpuMat object>
cv.cuda.GpuMat(arr[, allocator]) -> <cuda_GpuMat object>

constructor for GpuMat headers pointing to user-allocated data

◆ GpuMat() [8/11]

cv::cuda::GpuMat::GpuMat ( Size size,
int type,
void * data,
size_t step = Mat::AUTO_STEP )
Python:
cv.cuda.GpuMat([, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(rows, cols, type[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(size, type[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(rows, cols, type, s[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(size, type, s[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(m) -> <cuda_GpuMat object>
cv.cuda.GpuMat(m, rowRange, colRange) -> <cuda_GpuMat object>
cv.cuda.GpuMat(m, roi) -> <cuda_GpuMat object>
cv.cuda.GpuMat(arr[, allocator]) -> <cuda_GpuMat object>

◆ GpuMat() [9/11]

cv::cuda::GpuMat::GpuMat ( const GpuMat & m,
Range rowRange,
Range colRange )
Python:
cv.cuda.GpuMat([, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(rows, cols, type[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(size, type[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(rows, cols, type, s[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(size, type, s[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(m) -> <cuda_GpuMat object>
cv.cuda.GpuMat(m, rowRange, colRange) -> <cuda_GpuMat object>
cv.cuda.GpuMat(m, roi) -> <cuda_GpuMat object>
cv.cuda.GpuMat(arr[, allocator]) -> <cuda_GpuMat object>

creates a GpuMat header for a part of the bigger matrix

◆ GpuMat() [10/11]

cv::cuda::GpuMat::GpuMat ( const GpuMat & m,
Rect roi )
Python:
cv.cuda.GpuMat([, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(rows, cols, type[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(size, type[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(rows, cols, type, s[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(size, type, s[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(m) -> <cuda_GpuMat object>
cv.cuda.GpuMat(m, rowRange, colRange) -> <cuda_GpuMat object>
cv.cuda.GpuMat(m, roi) -> <cuda_GpuMat object>
cv.cuda.GpuMat(arr[, allocator]) -> <cuda_GpuMat object>

◆ GpuMat() [11/11]

cv::cuda::GpuMat::GpuMat ( InputArray arr,
GpuMat::Allocator * allocator = GpuMat::defaultAllocator() )
explicit
Python:
cv.cuda.GpuMat([, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(rows, cols, type[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(size, type[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(rows, cols, type, s[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(size, type, s[, allocator]) -> <cuda_GpuMat object>
cv.cuda.GpuMat(m) -> <cuda_GpuMat object>
cv.cuda.GpuMat(m, rowRange, colRange) -> <cuda_GpuMat object>
cv.cuda.GpuMat(m, roi) -> <cuda_GpuMat object>
cv.cuda.GpuMat(arr[, allocator]) -> <cuda_GpuMat object>

builds GpuMat from host memory (Blocking call)

◆ ~GpuMat()

cv::cuda::GpuMat::~GpuMat ( )

destructor - calls release()

Member Function Documentation

◆ adjustROI()

GpuMat & cv::cuda::GpuMat::adjustROI ( int dtop,
int dbottom,
int dleft,
int dright )
Python:
cv.cuda.GpuMat.adjustROI(dtop, dbottom, dleft, dright) -> retval

moves/resizes the current GpuMat ROI inside the parent GpuMat

◆ assignTo()

void cv::cuda::GpuMat::assignTo ( GpuMat & m,
int type = -1 ) const
Python:
cv.cuda.GpuMat.assignTo(m[, type]) -> None

◆ channels()

int cv::cuda::GpuMat::channels ( ) const
Python:
cv.cuda.GpuMat.channels() -> retval

returns number of channels

◆ clone()

GpuMat cv::cuda::GpuMat::clone ( ) const
Python:
cv.cuda.GpuMat.clone() -> retval

returns deep copy of the GpuMat, i.e. the data is copied

◆ col()

GpuMat cv::cuda::GpuMat::col ( int x) const
Python:
cv.cuda.GpuMat.col(x) -> retval

returns a new GpuMat header for the specified column

◆ colRange() [1/2]

GpuMat cv::cuda::GpuMat::colRange ( int startcol,
int endcol ) const
Python:
cv.cuda.GpuMat.colRange(startcol, endcol) -> retval
cv.cuda.GpuMat.colRange(r) -> retval

... for the specified column span

◆ colRange() [2/2]

GpuMat cv::cuda::GpuMat::colRange ( Range r) const
Python:
cv.cuda.GpuMat.colRange(startcol, endcol) -> retval
cv.cuda.GpuMat.colRange(r) -> retval

◆ convertTo() [1/8]

void cv::cuda::GpuMat::convertTo ( GpuMat & dst,
int rtype,
double alpha,
double beta,
Stream & stream ) const
inline
Python:
cv.cuda.GpuMat.convertTo(rtype, stream[, dst]) -> dst
cv.cuda.GpuMat.convertTo(rtype[, dst[, alpha[, beta]]]) -> dst
cv.cuda.GpuMat.convertTo(rtype, alpha, beta, stream[, dst]) -> dst

bindings overload which converts GpuMat to another datatype with scaling (Non-Blocking call)

◆ convertTo() [2/8]

void cv::cuda::GpuMat::convertTo ( GpuMat & dst,
int rtype,
double alpha = 1.0,
double beta = 0.0 ) const
inline
Python:
cv.cuda.GpuMat.convertTo(rtype, stream[, dst]) -> dst
cv.cuda.GpuMat.convertTo(rtype[, dst[, alpha[, beta]]]) -> dst
cv.cuda.GpuMat.convertTo(rtype, alpha, beta, stream[, dst]) -> dst

bindings overload which converts GpuMat to another datatype with scaling(Blocking call)

◆ convertTo() [3/8]

void cv::cuda::GpuMat::convertTo ( GpuMat & dst,
int rtype,
Stream & stream ) const
inline
Python:
cv.cuda.GpuMat.convertTo(rtype, stream[, dst]) -> dst
cv.cuda.GpuMat.convertTo(rtype[, dst[, alpha[, beta]]]) -> dst
cv.cuda.GpuMat.convertTo(rtype, alpha, beta, stream[, dst]) -> dst

bindings overload which converts GpuMat to another datatype (Non-Blocking call)

◆ convertTo() [4/8]

void cv::cuda::GpuMat::convertTo ( OutputArray dst,
int rtype ) const
Python:
cv.cuda.GpuMat.convertTo(rtype, stream[, dst]) -> dst
cv.cuda.GpuMat.convertTo(rtype[, dst[, alpha[, beta]]]) -> dst
cv.cuda.GpuMat.convertTo(rtype, alpha, beta, stream[, dst]) -> dst

converts GpuMat to another datatype (Blocking call)

◆ convertTo() [5/8]

void cv::cuda::GpuMat::convertTo ( OutputArray dst,
int rtype,
double alpha,
double beta,
Stream & stream ) const
Python:
cv.cuda.GpuMat.convertTo(rtype, stream[, dst]) -> dst
cv.cuda.GpuMat.convertTo(rtype[, dst[, alpha[, beta]]]) -> dst
cv.cuda.GpuMat.convertTo(rtype, alpha, beta, stream[, dst]) -> dst

converts GpuMat to another datatype with scaling (Non-Blocking call)

◆ convertTo() [6/8]

void cv::cuda::GpuMat::convertTo ( OutputArray dst,
int rtype,
double alpha,
double beta = 0.0 ) const
Python:
cv.cuda.GpuMat.convertTo(rtype, stream[, dst]) -> dst
cv.cuda.GpuMat.convertTo(rtype[, dst[, alpha[, beta]]]) -> dst
cv.cuda.GpuMat.convertTo(rtype, alpha, beta, stream[, dst]) -> dst

converts GpuMat to another datatype with scaling (Blocking call)

◆ convertTo() [7/8]

void cv::cuda::GpuMat::convertTo ( OutputArray dst,
int rtype,
double alpha,
Stream & stream ) const
Python:
cv.cuda.GpuMat.convertTo(rtype, stream[, dst]) -> dst
cv.cuda.GpuMat.convertTo(rtype[, dst[, alpha[, beta]]]) -> dst
cv.cuda.GpuMat.convertTo(rtype, alpha, beta, stream[, dst]) -> dst

converts GpuMat to another datatype with scaling (Non-Blocking call)

◆ convertTo() [8/8]

void cv::cuda::GpuMat::convertTo ( OutputArray dst,
int rtype,
Stream & stream ) const
Python:
cv.cuda.GpuMat.convertTo(rtype, stream[, dst]) -> dst
cv.cuda.GpuMat.convertTo(rtype[, dst[, alpha[, beta]]]) -> dst
cv.cuda.GpuMat.convertTo(rtype, alpha, beta, stream[, dst]) -> dst

converts GpuMat to another datatype (Non-Blocking call)

◆ copyTo() [1/8]

void cv::cuda::GpuMat::copyTo ( GpuMat & dst) const
inline
Python:
cv.cuda.GpuMat.copyTo([, dst]) -> dst
cv.cuda.GpuMat.copyTo(stream[, dst]) -> dst
cv.cuda.GpuMat.copyTo(mask[, dst]) -> dst
cv.cuda.GpuMat.copyTo(mask, stream[, dst]) -> dst

bindings overload which copies the GpuMat content to device memory (Blocking call)

Here is the call graph for this function:

◆ copyTo() [2/8]

void cv::cuda::GpuMat::copyTo ( GpuMat & dst,
GpuMat & mask ) const
inline
Python:
cv.cuda.GpuMat.copyTo([, dst]) -> dst
cv.cuda.GpuMat.copyTo(stream[, dst]) -> dst
cv.cuda.GpuMat.copyTo(mask[, dst]) -> dst
cv.cuda.GpuMat.copyTo(mask, stream[, dst]) -> dst

bindings overload which copies those GpuMat elements to "m" that are marked with non-zero mask elements (Blocking call)

Here is the call graph for this function:

◆ copyTo() [3/8]

void cv::cuda::GpuMat::copyTo ( GpuMat & dst,
GpuMat & mask,
Stream & stream ) const
inline
Python:
cv.cuda.GpuMat.copyTo([, dst]) -> dst
cv.cuda.GpuMat.copyTo(stream[, dst]) -> dst
cv.cuda.GpuMat.copyTo(mask[, dst]) -> dst
cv.cuda.GpuMat.copyTo(mask, stream[, dst]) -> dst

bindings overload which copies those GpuMat elements to "m" that are marked with non-zero mask elements (Non-Blocking call)

Here is the call graph for this function:

◆ copyTo() [4/8]

void cv::cuda::GpuMat::copyTo ( GpuMat & dst,
Stream & stream ) const
inline
Python:
cv.cuda.GpuMat.copyTo([, dst]) -> dst
cv.cuda.GpuMat.copyTo(stream[, dst]) -> dst
cv.cuda.GpuMat.copyTo(mask[, dst]) -> dst
cv.cuda.GpuMat.copyTo(mask, stream[, dst]) -> dst

bindings overload which copies the GpuMat content to device memory (Non-Blocking call)

Here is the call graph for this function:

◆ copyTo() [5/8]

void cv::cuda::GpuMat::copyTo ( OutputArray dst) const
Python:
cv.cuda.GpuMat.copyTo([, dst]) -> dst
cv.cuda.GpuMat.copyTo(stream[, dst]) -> dst
cv.cuda.GpuMat.copyTo(mask[, dst]) -> dst
cv.cuda.GpuMat.copyTo(mask, stream[, dst]) -> dst

copies the GpuMat content to device memory (Blocking call)

◆ copyTo() [6/8]

void cv::cuda::GpuMat::copyTo ( OutputArray dst,
InputArray mask ) const
Python:
cv.cuda.GpuMat.copyTo([, dst]) -> dst
cv.cuda.GpuMat.copyTo(stream[, dst]) -> dst
cv.cuda.GpuMat.copyTo(mask[, dst]) -> dst
cv.cuda.GpuMat.copyTo(mask, stream[, dst]) -> dst

copies those GpuMat elements to "m" that are marked with non-zero mask elements (Blocking call)

◆ copyTo() [7/8]

void cv::cuda::GpuMat::copyTo ( OutputArray dst,
InputArray mask,
Stream & stream ) const
Python:
cv.cuda.GpuMat.copyTo([, dst]) -> dst
cv.cuda.GpuMat.copyTo(stream[, dst]) -> dst
cv.cuda.GpuMat.copyTo(mask[, dst]) -> dst
cv.cuda.GpuMat.copyTo(mask, stream[, dst]) -> dst

copies those GpuMat elements to "m" that are marked with non-zero mask elements (Non-Blocking call)

◆ copyTo() [8/8]

void cv::cuda::GpuMat::copyTo ( OutputArray dst,
Stream & stream ) const
Python:
cv.cuda.GpuMat.copyTo([, dst]) -> dst
cv.cuda.GpuMat.copyTo(stream[, dst]) -> dst
cv.cuda.GpuMat.copyTo(mask[, dst]) -> dst
cv.cuda.GpuMat.copyTo(mask, stream[, dst]) -> dst

copies the GpuMat content to device memory (Non-Blocking call)

◆ create() [1/2]

void cv::cuda::GpuMat::create ( int rows,
int cols,
int type )
Python:
cv.cuda.GpuMat.create(rows, cols, type) -> None
cv.cuda.GpuMat.create(size, type) -> None

allocates new GpuMat data unless the GpuMat already has specified size and type

◆ create() [2/2]

void cv::cuda::GpuMat::create ( Size size,
int type )
Python:
cv.cuda.GpuMat.create(rows, cols, type) -> None
cv.cuda.GpuMat.create(size, type) -> None

◆ cudaPtr()

void * cv::cuda::GpuMat::cudaPtr ( ) const
Python:
cv.cuda.GpuMat.cudaPtr() -> retval

◆ defaultAllocator()

static GpuMat::Allocator * cv::cuda::GpuMat::defaultAllocator ( )
static
Python:
cv.cuda.GpuMat.defaultAllocator() -> retval
cv.cuda.GpuMat_defaultAllocator() -> retval

default allocator

◆ depth()

int cv::cuda::GpuMat::depth ( ) const
Python:
cv.cuda.GpuMat.depth() -> retval

returns element type

◆ download() [1/2]

void cv::cuda::GpuMat::download ( OutputArray dst) const
Python:
cv.cuda.GpuMat.download([, dst]) -> dst
cv.cuda.GpuMat.download(stream[, dst]) -> dst

Performs data download from GpuMat (Blocking call)

This function copies data from device memory to host memory. As being a blocking call, it is guaranteed that the copy operation is finished when this function returns.

◆ download() [2/2]

void cv::cuda::GpuMat::download ( OutputArray dst,
Stream & stream ) const
Python:
cv.cuda.GpuMat.download([, dst]) -> dst
cv.cuda.GpuMat.download(stream[, dst]) -> dst

Performs data download from GpuMat (Non-Blocking call)

This function copies data from device memory to host memory. As being a non-blocking call, this function may return even if the copy operation is not finished.

The copy operation may be overlapped with operations in other non-default streams if stream is not the default stream and dst is HostMem allocated with HostMem::PAGE_LOCKED option.

◆ elemSize()

size_t cv::cuda::GpuMat::elemSize ( ) const
Python:
cv.cuda.GpuMat.elemSize() -> retval

returns element size in bytes

◆ elemSize1()

size_t cv::cuda::GpuMat::elemSize1 ( ) const
Python:
cv.cuda.GpuMat.elemSize1() -> retval

returns the size of element channel in bytes

◆ empty()

bool cv::cuda::GpuMat::empty ( ) const
Python:
cv.cuda.GpuMat.empty() -> retval

returns true if GpuMat data is NULL

◆ getStdAllocator()

static GpuMat::Allocator * cv::cuda::GpuMat::getStdAllocator ( )
static
Python:
cv.cuda.GpuMat.getStdAllocator() -> retval
cv.cuda.GpuMat_getStdAllocator() -> retval

◆ isContinuous()

bool cv::cuda::GpuMat::isContinuous ( ) const
Python:
cv.cuda.GpuMat.isContinuous() -> retval

returns true iff the GpuMat data is continuous (i.e. when there are no gaps between successive rows)

◆ locateROI()

void cv::cuda::GpuMat::locateROI ( Size & wholeSize,
Point & ofs ) const
Python:
cv.cuda.GpuMat.locateROI(wholeSize, ofs) -> None

locates GpuMat header within a parent GpuMat

◆ operator PtrStep< _Tp >()

template<typename _Tp >
cv::cuda::GpuMat::operator PtrStep< _Tp > ( ) const

◆ operator PtrStepSz< _Tp >()

template<typename _Tp >
cv::cuda::GpuMat::operator PtrStepSz< _Tp > ( ) const

◆ operator()() [1/2]

GpuMat cv::cuda::GpuMat::operator() ( Range rowRange,
Range colRange ) const

extracts a rectangular sub-GpuMat (this is a generalized form of row, rowRange etc.)

◆ operator()() [2/2]

GpuMat cv::cuda::GpuMat::operator() ( Rect roi) const

◆ operator=()

GpuMat & cv::cuda::GpuMat::operator= ( const GpuMat & m)

assignment operators

◆ ptr() [1/4]

uchar * cv::cuda::GpuMat::ptr ( int y = 0)

returns pointer to y-th row

◆ ptr() [2/4]

template<typename _Tp >
_Tp * cv::cuda::GpuMat::ptr ( int y = 0)

template version of the above method

◆ ptr() [3/4]

const uchar * cv::cuda::GpuMat::ptr ( int y = 0) const

◆ ptr() [4/4]

template<typename _Tp >
const _Tp * cv::cuda::GpuMat::ptr ( int y = 0) const

◆ release()

void cv::cuda::GpuMat::release ( )
Python:
cv.cuda.GpuMat.release() -> None

decreases reference counter, deallocate the data when reference counter reaches 0

◆ reshape()

GpuMat cv::cuda::GpuMat::reshape ( int cn,
int rows = 0 ) const
Python:
cv.cuda.GpuMat.reshape(cn[, rows]) -> retval

creates alternative GpuMat header for the same data, with different number of channels and/or different number of rows

◆ row()

GpuMat cv::cuda::GpuMat::row ( int y) const
Python:
cv.cuda.GpuMat.row(y) -> retval

returns a new GpuMat header for the specified row

◆ rowRange() [1/2]

GpuMat cv::cuda::GpuMat::rowRange ( int startrow,
int endrow ) const
Python:
cv.cuda.GpuMat.rowRange(startrow, endrow) -> retval
cv.cuda.GpuMat.rowRange(r) -> retval

... for the specified row span

◆ rowRange() [2/2]

GpuMat cv::cuda::GpuMat::rowRange ( Range r) const
Python:
cv.cuda.GpuMat.rowRange(startrow, endrow) -> retval
cv.cuda.GpuMat.rowRange(r) -> retval

◆ setDefaultAllocator()

static void cv::cuda::GpuMat::setDefaultAllocator ( GpuMat::Allocator * allocator)
static
Python:
cv.cuda.GpuMat.setDefaultAllocator(allocator) -> None
cv.cuda.GpuMat_setDefaultAllocator(allocator) -> None

◆ setTo() [1/4]

GpuMat & cv::cuda::GpuMat::setTo ( Scalar s)
Python:
cv.cuda.GpuMat.setTo(s) -> retval
cv.cuda.GpuMat.setTo(s, stream) -> retval
cv.cuda.GpuMat.setTo(s, mask) -> retval
cv.cuda.GpuMat.setTo(s, mask, stream) -> retval

sets some of the GpuMat elements to s (Blocking call)

◆ setTo() [2/4]

GpuMat & cv::cuda::GpuMat::setTo ( Scalar s,
InputArray mask )
Python:
cv.cuda.GpuMat.setTo(s) -> retval
cv.cuda.GpuMat.setTo(s, stream) -> retval
cv.cuda.GpuMat.setTo(s, mask) -> retval
cv.cuda.GpuMat.setTo(s, mask, stream) -> retval

sets some of the GpuMat elements to s, according to the mask (Blocking call)

◆ setTo() [3/4]

GpuMat & cv::cuda::GpuMat::setTo ( Scalar s,
InputArray mask,
Stream & stream )
Python:
cv.cuda.GpuMat.setTo(s) -> retval
cv.cuda.GpuMat.setTo(s, stream) -> retval
cv.cuda.GpuMat.setTo(s, mask) -> retval
cv.cuda.GpuMat.setTo(s, mask, stream) -> retval

sets some of the GpuMat elements to s, according to the mask (Non-Blocking call)

◆ setTo() [4/4]

GpuMat & cv::cuda::GpuMat::setTo ( Scalar s,
Stream & stream )
Python:
cv.cuda.GpuMat.setTo(s) -> retval
cv.cuda.GpuMat.setTo(s, stream) -> retval
cv.cuda.GpuMat.setTo(s, mask) -> retval
cv.cuda.GpuMat.setTo(s, mask, stream) -> retval

sets some of the GpuMat elements to s (Non-Blocking call)

◆ size()

Size cv::cuda::GpuMat::size ( ) const
Python:
cv.cuda.GpuMat.size() -> retval

returns GpuMat size : width == number of columns, height == number of rows

◆ step1()

size_t cv::cuda::GpuMat::step1 ( ) const
Python:
cv.cuda.GpuMat.step1() -> retval

returns step/elemSize1()

◆ swap()

void cv::cuda::GpuMat::swap ( GpuMat & mat)
Python:
cv.cuda.GpuMat.swap(mat) -> None

swaps with other smart pointer

◆ type()

int cv::cuda::GpuMat::type ( ) const
Python:
cv.cuda.GpuMat.type() -> retval

returns element type

◆ updateContinuityFlag()

void cv::cuda::GpuMat::updateContinuityFlag ( )
Python:
cv.cuda.GpuMat.updateContinuityFlag() -> None

internal use method: updates the continuity flag

◆ upload() [1/2]

void cv::cuda::GpuMat::upload ( InputArray arr)
Python:
cv.cuda.GpuMat.upload(arr) -> None
cv.cuda.GpuMat.upload(arr, stream) -> None

Performs data upload to GpuMat (Blocking call)

This function copies data from host memory to device memory. As being a blocking call, it is guaranteed that the copy operation is finished when this function returns.

◆ upload() [2/2]

void cv::cuda::GpuMat::upload ( InputArray arr,
Stream & stream )
Python:
cv.cuda.GpuMat.upload(arr) -> None
cv.cuda.GpuMat.upload(arr, stream) -> None

Performs data upload to GpuMat (Non-Blocking call)

This function copies data from host memory to device memory. As being a non-blocking call, this function may return even if the copy operation is not finished.

The copy operation may be overlapped with operations in other non-default streams if stream is not the default stream and dst is HostMem allocated with HostMem::PAGE_LOCKED option.

Member Data Documentation

◆ allocator

Allocator* cv::cuda::GpuMat::allocator

allocator

◆ cols

int cv::cuda::GpuMat::cols

◆ data

uchar* cv::cuda::GpuMat::data

pointer to the data

◆ dataend

const uchar* cv::cuda::GpuMat::dataend

◆ datastart

uchar* cv::cuda::GpuMat::datastart

helper fields used in locateROI and adjustROI

◆ flags

int cv::cuda::GpuMat::flags

includes several bit-fields:

  • the magic signature
  • continuity flag
  • depth
  • number of channels

◆ refcount

int* cv::cuda::GpuMat::refcount

pointer to the reference counter; when GpuMat points to user-allocated data, the pointer is NULL

◆ rows

int cv::cuda::GpuMat::rows

the number of rows and columns

◆ step

size_t cv::cuda::GpuMat::step

a distance between successive rows in bytes; includes the gap if any


The documentation for this class was generated from the following file: