OpenCV  3.1.0
Open Source Computer Vision
How to Use Background Subtraction Methods

Goals

In this tutorial you will learn how to:

  1. Read data from videos by using cv::VideoCapture or image sequences by using cv::imread ;
  2. Create and update the background model by using cv::BackgroundSubtractor class;
  3. Get and show the foreground mask by using cv::imshow ;
  4. Save the output by using cv::imwrite to quantitatively evaluate the results.

Code

In the following you can find the source code. We will let the user chose to process either a video file or a sequence of images.

Two different methods are used to generate two foreground masks:

  1. cv::bgsegm::BackgroundSubtractorMOG
  2. cv::BackgroundSubtractorMOG2

The results as well as the input data are shown on the screen. The source file can be downloaded here.

//opencv
//C
#include <stdio.h>
//C++
#include <iostream>
#include <sstream>
using namespace cv;
using namespace std;
// Global variables
Mat frame; //current frame
Mat fgMaskMOG2; //fg mask fg mask generated by MOG2 method
Ptr<BackgroundSubtractor> pMOG2; //MOG2 Background subtractor
int keyboard; //input from keyboard
void help();
void processVideo(char* videoFilename);
void processImages(char* firstFrameFilename);
void help()
{
cout
<< "--------------------------------------------------------------------------" << endl
<< "This program shows how to use background subtraction methods provided by " << endl
<< " OpenCV. You can process both videos (-vid) and images (-img)." << endl
<< endl
<< "Usage:" << endl
<< "./bs {-vid <video filename>|-img <image filename>}" << endl
<< "for example: ./bs -vid video.avi" << endl
<< "or: ./bs -img /data/images/1.png" << endl
<< "--------------------------------------------------------------------------" << endl
<< endl;
}
int main(int argc, char* argv[])
{
//print help information
help();
//check for the input parameter correctness
if(argc != 3) {
cerr <<"Incorret input list" << endl;
cerr <<"exiting..." << endl;
return EXIT_FAILURE;
}
//create GUI windows
namedWindow("Frame");
namedWindow("FG Mask MOG 2");
//create Background Subtractor objects
pMOG2 = createBackgroundSubtractorMOG2(); //MOG2 approach
if(strcmp(argv[1], "-vid") == 0) {
//input data coming from a video
processVideo(argv[2]);
}
else if(strcmp(argv[1], "-img") == 0) {
//input data coming from a sequence of images
processImages(argv[2]);
}
else {
//error in reading input parameters
cerr <<"Please, check the input parameters." << endl;
cerr <<"Exiting..." << endl;
return EXIT_FAILURE;
}
//destroy GUI windows
return EXIT_SUCCESS;
}
void processVideo(char* videoFilename) {
//create the capture object
VideoCapture capture(videoFilename);
if(!capture.isOpened()){
//error in opening the video input
cerr << "Unable to open video file: " << videoFilename << endl;
exit(EXIT_FAILURE);
}
//read input data. ESC or 'q' for quitting
while( (char)keyboard != 'q' && (char)keyboard != 27 ){
//read the current frame
if(!capture.read(frame)) {
cerr << "Unable to read next frame." << endl;
cerr << "Exiting..." << endl;
exit(EXIT_FAILURE);
}
//update the background model
pMOG2->apply(frame, fgMaskMOG2);
//get the frame number and write it on the current frame
stringstream ss;
rectangle(frame, cv::Point(10, 2), cv::Point(100,20),
cv::Scalar(255,255,255), -1);
ss << capture.get(CAP_PROP_POS_FRAMES);
string frameNumberString = ss.str();
putText(frame, frameNumberString.c_str(), cv::Point(15, 15),
//show the current frame and the fg masks
imshow("Frame", frame);
imshow("FG Mask MOG 2", fgMaskMOG2);
//get the input from the keyboard
keyboard = waitKey( 30 );
}
//delete capture object
capture.release();
}
void processImages(char* fistFrameFilename) {
//read the first file of the sequence
frame = imread(fistFrameFilename);
if(frame.empty()){
//error in opening the first image
cerr << "Unable to open first image frame: " << fistFrameFilename << endl;
exit(EXIT_FAILURE);
}
//current image filename
string fn(fistFrameFilename);
//read input data. ESC or 'q' for quitting
while( (char)keyboard != 'q' && (char)keyboard != 27 ){
//update the background model
pMOG2->apply(frame, fgMaskMOG2);
//get the frame number and write it on the current frame
size_t index = fn.find_last_of("/");
if(index == string::npos) {
index = fn.find_last_of("\\");
}
size_t index2 = fn.find_last_of(".");
string prefix = fn.substr(0,index+1);
string suffix = fn.substr(index2);
string frameNumberString = fn.substr(index+1, index2-index-1);
istringstream iss(frameNumberString);
int frameNumber = 0;
iss >> frameNumber;
rectangle(frame, cv::Point(10, 2), cv::Point(100,20),
cv::Scalar(255,255,255), -1);
putText(frame, frameNumberString.c_str(), cv::Point(15, 15),
//show the current frame and the fg masks
imshow("Frame", frame);
imshow("FG Mask MOG 2", fgMaskMOG2);
//get the input from the keyboard
keyboard = waitKey( 30 );
//search for the next image in the sequence
ostringstream oss;
oss << (frameNumber + 1);
string nextFrameNumberString = oss.str();
string nextFrameFilename = prefix + nextFrameNumberString + suffix;
//read the next frame
frame = imread(nextFrameFilename);
if(frame.empty()){
//error in opening the next image in the sequence
cerr << "Unable to open image frame: " << nextFrameFilename << endl;
exit(EXIT_FAILURE);
}
//update the path of the current frame
fn.assign(nextFrameFilename);
}
}

Explanation

We discuss the main parts of the above code:

  1. First, three Mat objects are allocated to store the current frame and two foreground masks, obtained by using two different BS algorithms.
    Mat frame; //current frame
    Mat fgMaskMOG; //fg mask generated by MOG method
    Mat fgMaskMOG2; //fg mask fg mask generated by MOG2 method
  2. Two cv::BackgroundSubtractor objects will be used to generate the foreground masks. In this example, default parameters are used, but it is also possible to declare specific parameters in the create function.
    Ptr<BackgroundSubtractor> pMOG; //MOG Background subtractor
    Ptr<BackgroundSubtractor> pMOG2; //MOG2 Background subtractor
    ...
    //create Background Subtractor objects
    pMOG = createBackgroundSubtractorMOG(); //MOG approach
    pMOG2 = createBackgroundSubtractorMOG2(); //MOG2 approach
  3. The command line arguments are analysed. The user can chose between two options:
    • video files (by choosing the option -vid);
    • image sequences (by choosing the option -img).
      if(strcmp(argv[1], "-vid") == 0) {
      //input data coming from a video
      processVideo(argv[2]);
      }
      else if(strcmp(argv[1], "-img") == 0) {
      //input data coming from a sequence of images
      processImages(argv[2]);
      }
  4. Suppose you want to process a video file. The video is read until the end is reached or the user presses the button 'q' or the button 'ESC'.
    while( (char)keyboard != 'q' && (char)keyboard != 27 ){
    //read the current frame
    if(!capture.read(frame)) {
    cerr << "Unable to read next frame." << endl;
    cerr << "Exiting..." << endl;
    exit(EXIT_FAILURE);
    }
  5. Every frame is used both for calculating the foreground mask and for updating the background. If you want to change the learning rate used for updating the background model, it is possible to set a specific learning rate by passing a third parameter to the 'apply' method.
    //update the background model
    pMOG->apply(frame, fgMaskMOG);
    pMOG2->apply(frame, fgMaskMOG2);
  6. The current frame number can be extracted from the cv::VideoCapture object and stamped in the top left corner of the current frame. A white rectangle is used to highlight the black colored frame number.
    //get the frame number and write it on the current frame
    stringstream ss;
    rectangle(frame, cv::Point(10, 2), cv::Point(100,20),
    cv::Scalar(255,255,255), -1);
    ss << capture.get(CAP_PROP_POS_FRAMES);
    string frameNumberString = ss.str();
    putText(frame, frameNumberString.c_str(), cv::Point(15, 15),
  7. We are ready to show the current input frame and the results.
    //show the current frame and the fg masks
    imshow("Frame", frame);
    imshow("FG Mask MOG", fgMaskMOG);
    imshow("FG Mask MOG 2", fgMaskMOG2);
  8. The same operations listed above can be performed using a sequence of images as input. The processImage function is called and, instead of using a cv::VideoCapture object, the images are read by using cv::imread , after individuating the correct path for the next frame to read.
    //read the first file of the sequence
    frame = imread(fistFrameFilename);
    if(!frame.data){
    //error in opening the first image
    cerr << "Unable to open first image frame: " << fistFrameFilename << endl;
    exit(EXIT_FAILURE);
    }
    ...
    //search for the next image in the sequence
    ostringstream oss;
    oss << (frameNumber + 1);
    string nextFrameNumberString = oss.str();
    string nextFrameFilename = prefix + nextFrameNumberString + suffix;
    //read the next frame
    frame = imread(nextFrameFilename);
    if(!frame.data){
    //error in opening the next image in the sequence
    cerr << "Unable to open image frame: " << nextFrameFilename << endl;
    exit(EXIT_FAILURE);
    }
    //update the path of the current frame
    fn.assign(nextFrameFilename);
    Note that this example works only on image sequences in which the filename format is <n>.png, where n is the frame number (e.g., 7.png).

Results

Evaluation

To quantitatively evaluate the results obtained, we need to:

In order to save the output images, we can use cv::imwrite . Adding the following code allows for saving the foreground masks.

string imageToSave = "output_MOG_" + frameNumberString + ".png";
bool saved = imwrite(imageToSave, fgMaskMOG);
if(!saved) {
cerr << "Unable to save " << imageToSave << endl;
}

Once we have collected the result images, we can compare them with the ground truth data. There exist several publicly available sequences for background subtraction that come with ground truth data. If you decide to use the Background Models Challenge (BMC), then the result images can be used as input for the BMC Wizard. The wizard can compute different measures about the accuracy of the results.

References