To do the tracking we need a video and object position on the first frame.
To run the code you have to specify input (camera id or video_file). Then, select a bounding box with the mouse, and press any key to start tracking 
#include <opencv2/opencv.hpp>
#include <vector>
#include <iostream>
#include <iomanip>
#include "stats.h" 
#include "utils.h" 
const double akaze_thresh = 3e-4; 
const double ransac_thresh = 2.5f; 
const double nn_match_ratio = 0.8f; 
const int bb_min_inliers = 100; 
const int stats_update_period = 10; 
namespace example {
{
public:
        detector(_detector),
        matcher(_matcher)
    {}
    void setFirstFrame(
const Mat frame, vector<Point2f> bb, 
string title, Stats& stats);
     Mat process(
const Mat frame, Stats& stats);
         return detector;
    }
protected:
    Mat first_frame, first_desc;
     vector<KeyPoint> first_kp;
    vector<Point2f> object_bb;
};
void Tracker::setFirstFrame(
const Mat frame, vector<Point2f> bb, 
string title, Stats& stats)
 {
    const Point* ptContain = { &ptMask[0] };
     int iSize = static_cast<int>(bb.size());
    for (size_t i=0; i<bb.size(); i++) {
        ptMask[
i].
x = 
static_cast<int>(bb[
i].x);
        ptMask[
i].
y = 
static_cast<int>(bb[
i].y);
    }
    first_frame = frame.
clone();
    stats.keypoints = (int)first_kp.size();
    drawBoundingBox(first_frame, bb);
    object_bb = bb;
    delete[] ptMask;
}
Mat Tracker::process(
const Mat frame, Stats& stats)
 {
    vector<KeyPoint> kp;
    stats.keypoints = (int)kp.size();
    vector< vector<DMatch> > matches;
    vector<KeyPoint> matched1, matched2;
    matcher->knnMatch(first_desc, desc, matches, 2);
    for(unsigned i = 0; i < matches.size(); i++) {
        if(matches[i][0].distance < nn_match_ratio * matches[i][1].distance) {
            matched1.push_back(first_kp[matches[i][0].queryIdx]);
            matched2.push_back(      kp[matches[i][0].trainIdx]);
        }
    }
    stats.matches = (int)matched1.size();
    Mat inlier_mask, homography;
     vector<KeyPoint> inliers1, inliers2;
    vector<DMatch> inlier_matches;
    if(matched1.size() >= 4) {
                                    RANSAC, ransac_thresh, inlier_mask);
     }
    if(matched1.size() < 4 || homography.
empty()) {
         stats.inliers = 0;
        stats.ratio = 0;
        return res;
    }
    for(unsigned i = 0; i < matched1.size(); i++) {
            int new_i = static_cast<int>(inliers1.size());
            inliers1.push_back(matched1[i]);
            inliers2.push_back(matched2[i]);
            inlier_matches.push_back(
DMatch(new_i, new_i, 0));
        }
    }
    stats.inliers = (int)inliers1.size();
    stats.ratio = stats.inliers * 1.0 / stats.matches;
    vector<Point2f> new_bb;
    if(stats.inliers >= bb_min_inliers) {
        drawBoundingBox(frame_with_bb, new_bb);
    }
    drawMatches(first_frame, inliers1, frame_with_bb, inliers2,
                 inlier_matches, res,
    return res;
}
}
int main(int argc, char **argv)
{
    if(argc < 2) {
        cerr << "Usage: " << endl
             << "akaze_track input_path" << endl
             << "  (input_path can be a camera id, like 0,1,2 or a video filename)" << endl;
        return 1;
    }
    std::string video_name = argv[1];
    std::stringstream ssFormat;
    ssFormat << atoi(argv[1]);
    if (video_name.compare(ssFormat.str())==0) {    
        video_in.
open(atoi(argv[1]));
    }
    else {
        video_in.
open(video_name);
    }
        cerr << "Couldn't open " << argv[1] << endl;
        return 1;
    }
    Stats stats, akaze_stats, orb_stats;
    example::Tracker akaze_tracker(akaze, matcher);
    example::Tracker orb_tracker(orb, matcher);
    video_in >> frame;
    cout << "Please select a bounding box, and press any key to continue." << endl;
    vector<Point2f> bb;
    bb.push_back(
cv::Point2f(static_cast<float>(uBox.
x), static_cast<float>(uBox.
y)));
    bb.push_back(
cv::Point2f(static_cast<float>(uBox.
x+uBox.
width), static_cast<float>(uBox.
y)));
    akaze_tracker.setFirstFrame(frame, bb, "AKAZE", stats);
    orb_tracker.setFirstFrame(frame, bb, "ORB", stats);
    Stats akaze_draw_stats, orb_draw_stats;
    Mat akaze_res, orb_res, res_frame;
     int i = 0;
    for(;;) {
        i++;
        bool update_stats = (i % stats_update_period == 0);
        video_in >> frame;
        
        if(frame.empty()) break;
        akaze_res = akaze_tracker.process(frame, stats);
        akaze_stats += stats;
        if(update_stats) {
            akaze_draw_stats = stats;
        }
        orb_res = orb_tracker.process(frame, stats);
        orb_stats += stats;
        if(update_stats) {
            orb_draw_stats = stats;
        }
        drawStatistics(akaze_res, akaze_draw_stats);
        drawStatistics(orb_res, orb_draw_stats);
        vconcat(akaze_res, orb_res, res_frame);
     }
    akaze_stats /= i - 1;
    orb_stats /= i - 1;
    printStatistics("AKAZE", akaze_stats);
    printStatistics("ORB", orb_stats);
    return 0;
}
This class implements algorithm described abobve using given feature detector and descriptor matcher.