OpenCV  4.10.0-dev
Open Source Computer Vision
Loading...
Searching...
No Matches
Public Types | Public Member Functions | Static Public Member Functions | Public Attributes | List of all members
cv::cuda::HostMem Class Reference

Class with reference counting wrapping special memory type allocation functions from CUDA. More...

#include <opencv2/core/cuda.hpp>

Collaboration diagram for cv::cuda::HostMem:

Public Types

enum  AllocType {
  PAGE_LOCKED = 1 ,
  SHARED = 2 ,
  WRITE_COMBINED = 4
}
 

Public Member Functions

 HostMem (const HostMem &m)
 
 HostMem (HostMem::AllocType alloc_type=HostMem::AllocType::PAGE_LOCKED)
 
 HostMem (InputArray arr, HostMem::AllocType alloc_type=HostMem::AllocType::PAGE_LOCKED)
 creates from host memory with coping data
 
 HostMem (int rows, int cols, int type, HostMem::AllocType alloc_type=HostMem::AllocType::PAGE_LOCKED)
 
 HostMem (Size size, int type, HostMem::AllocType alloc_type=HostMem::AllocType::PAGE_LOCKED)
 
 ~HostMem ()
 
int channels () const
 
HostMem clone () const
 returns deep copy of the matrix, i.e. the data is copied
 
void create (int rows, int cols, int type)
 allocates new matrix data unless the matrix already has specified size and type.
 
void create (Size size, int type)
 
GpuMat createGpuMatHeader () const
 Maps CPU memory to GPU address space and creates the cuda::GpuMat header without reference counting for it.
 
Mat createMatHeader () const
 returns matrix header with disabled reference counting for HostMem data.
 
int depth () const
 
size_t elemSize () const
 
size_t elemSize1 () const
 
bool empty () const
 
bool isContinuous () const
 
HostMemoperator= (const HostMem &m)
 
void release ()
 decrements reference counter and released memory if needed.
 
HostMem reshape (int cn, int rows=0) const
 
Size size () const
 
size_t step1 () const
 
void swap (HostMem &b)
 swaps with other smart pointer
 
int type () const
 

Static Public Member Functions

static MatAllocatorgetAllocator (HostMem::AllocType alloc_type=HostMem::AllocType::PAGE_LOCKED)
 

Public Attributes

AllocType alloc_type
 
int cols
 
uchardata
 
const uchardataend
 
uchardatastart
 
int flags
 
int * refcount
 
int rows
 
size_t step
 

Detailed Description

Class with reference counting wrapping special memory type allocation functions from CUDA.

Its interface is also Mat-like but with additional memory type parameters.

Note
Allocation size of such memory types is usually limited. For more details, see CUDA 2.2 Pinned Memory APIs document or CUDA C Programming Guide.

Member Enumeration Documentation

◆ AllocType

Enumerator
PAGE_LOCKED 
SHARED 
WRITE_COMBINED 

Constructor & Destructor Documentation

◆ HostMem() [1/5]

cv::cuda::HostMem::HostMem ( HostMem::AllocType  alloc_type = HostMem::AllocType::PAGE_LOCKED)
explicit
Python:
cv.cuda.HostMem([, alloc_type]) -> <cuda_HostMem object>
cv.cuda.HostMem(rows, cols, type[, alloc_type]) -> <cuda_HostMem object>
cv.cuda.HostMem(size, type[, alloc_type]) -> <cuda_HostMem object>
cv.cuda.HostMem(arr[, alloc_type]) -> <cuda_HostMem object>

◆ HostMem() [2/5]

cv::cuda::HostMem::HostMem ( const HostMem m)
Python:
cv.cuda.HostMem([, alloc_type]) -> <cuda_HostMem object>
cv.cuda.HostMem(rows, cols, type[, alloc_type]) -> <cuda_HostMem object>
cv.cuda.HostMem(size, type[, alloc_type]) -> <cuda_HostMem object>
cv.cuda.HostMem(arr[, alloc_type]) -> <cuda_HostMem object>

◆ HostMem() [3/5]

cv::cuda::HostMem::HostMem ( int  rows,
int  cols,
int  type,
HostMem::AllocType  alloc_type = HostMem::AllocType::PAGE_LOCKED 
)
Python:
cv.cuda.HostMem([, alloc_type]) -> <cuda_HostMem object>
cv.cuda.HostMem(rows, cols, type[, alloc_type]) -> <cuda_HostMem object>
cv.cuda.HostMem(size, type[, alloc_type]) -> <cuda_HostMem object>
cv.cuda.HostMem(arr[, alloc_type]) -> <cuda_HostMem object>

◆ HostMem() [4/5]

cv::cuda::HostMem::HostMem ( Size  size,
int  type,
HostMem::AllocType  alloc_type = HostMem::AllocType::PAGE_LOCKED 
)
Python:
cv.cuda.HostMem([, alloc_type]) -> <cuda_HostMem object>
cv.cuda.HostMem(rows, cols, type[, alloc_type]) -> <cuda_HostMem object>
cv.cuda.HostMem(size, type[, alloc_type]) -> <cuda_HostMem object>
cv.cuda.HostMem(arr[, alloc_type]) -> <cuda_HostMem object>

◆ HostMem() [5/5]

cv::cuda::HostMem::HostMem ( InputArray  arr,
HostMem::AllocType  alloc_type = HostMem::AllocType::PAGE_LOCKED 
)
explicit
Python:
cv.cuda.HostMem([, alloc_type]) -> <cuda_HostMem object>
cv.cuda.HostMem(rows, cols, type[, alloc_type]) -> <cuda_HostMem object>
cv.cuda.HostMem(size, type[, alloc_type]) -> <cuda_HostMem object>
cv.cuda.HostMem(arr[, alloc_type]) -> <cuda_HostMem object>

creates from host memory with coping data

◆ ~HostMem()

cv::cuda::HostMem::~HostMem ( )

Member Function Documentation

◆ channels()

int cv::cuda::HostMem::channels ( ) const
Python:
cv.cuda.HostMem.channels() -> retval

◆ clone()

HostMem cv::cuda::HostMem::clone ( ) const
Python:
cv.cuda.HostMem.clone() -> retval

returns deep copy of the matrix, i.e. the data is copied

◆ create() [1/2]

void cv::cuda::HostMem::create ( int  rows,
int  cols,
int  type 
)
Python:
cv.cuda.HostMem.create(rows, cols, type) -> None

allocates new matrix data unless the matrix already has specified size and type.

◆ create() [2/2]

void cv::cuda::HostMem::create ( Size  size,
int  type 
)
Python:
cv.cuda.HostMem.create(rows, cols, type) -> None

◆ createGpuMatHeader()

GpuMat cv::cuda::HostMem::createGpuMatHeader ( ) const

Maps CPU memory to GPU address space and creates the cuda::GpuMat header without reference counting for it.

This can be done only if memory was allocated with the SHARED flag and if it is supported by the hardware. Laptops often share video and CPU memory, so address spaces can be mapped, which eliminates an extra copy.

◆ createMatHeader()

Mat cv::cuda::HostMem::createMatHeader ( ) const
Python:
cv.cuda.HostMem.createMatHeader() -> retval

returns matrix header with disabled reference counting for HostMem data.

◆ depth()

int cv::cuda::HostMem::depth ( ) const
Python:
cv.cuda.HostMem.depth() -> retval

◆ elemSize()

size_t cv::cuda::HostMem::elemSize ( ) const
Python:
cv.cuda.HostMem.elemSize() -> retval

◆ elemSize1()

size_t cv::cuda::HostMem::elemSize1 ( ) const
Python:
cv.cuda.HostMem.elemSize1() -> retval

◆ empty()

bool cv::cuda::HostMem::empty ( ) const
Python:
cv.cuda.HostMem.empty() -> retval

◆ getAllocator()

static MatAllocator * cv::cuda::HostMem::getAllocator ( HostMem::AllocType  alloc_type = HostMem::AllocType::PAGE_LOCKED)
static

◆ isContinuous()

bool cv::cuda::HostMem::isContinuous ( ) const
Python:
cv.cuda.HostMem.isContinuous() -> retval

◆ operator=()

HostMem & cv::cuda::HostMem::operator= ( const HostMem m)

◆ release()

void cv::cuda::HostMem::release ( )

decrements reference counter and released memory if needed.

◆ reshape()

HostMem cv::cuda::HostMem::reshape ( int  cn,
int  rows = 0 
) const
Python:
cv.cuda.HostMem.reshape(cn[, rows]) -> retval

creates alternative HostMem header for the same data, with different number of channels and/or different number of rows

◆ size()

Size cv::cuda::HostMem::size ( ) const
Python:
cv.cuda.HostMem.size() -> retval

◆ step1()

size_t cv::cuda::HostMem::step1 ( ) const
Python:
cv.cuda.HostMem.step1() -> retval

◆ swap()

void cv::cuda::HostMem::swap ( HostMem b)
Python:
cv.cuda.HostMem.swap(b) -> None

swaps with other smart pointer

◆ type()

int cv::cuda::HostMem::type ( ) const
Python:
cv.cuda.HostMem.type() -> retval

Member Data Documentation

◆ alloc_type

AllocType cv::cuda::HostMem::alloc_type

◆ cols

int cv::cuda::HostMem::cols

◆ data

uchar* cv::cuda::HostMem::data

◆ dataend

const uchar* cv::cuda::HostMem::dataend

◆ datastart

uchar* cv::cuda::HostMem::datastart

◆ flags

int cv::cuda::HostMem::flags

◆ refcount

int* cv::cuda::HostMem::refcount

◆ rows

int cv::cuda::HostMem::rows

◆ step

size_t cv::cuda::HostMem::step

The documentation for this class was generated from the following file: