#include <algorithm>
#include <iostream>
#include <sstream>
const std::string keys =
"{ h help | | Print this help message }"
"{ detector | | Path to compiled .blob face detector model }"
"{ duration | 100 | Number of frames to pull from camera and run inference on }";
namespace custom {
}
};
static void run(const std::vector<cv::Rect> &in_face_rcs,
std::vector<cv::gapi::wip::draw::Prim> &out_prims) {
out_prims.clear();
};
for (auto &&rc : in_face_rcs) {
out_prims.emplace_back(cvt(rc,
CV_RGB(0,255,0)));
}
}
};
}
int main(int argc, char *argv[]) {
if (cmd.has("help")) {
cmd.printMessage();
return 0;
}
const auto det_name = cmd.get<std::string>("detector");
const auto duration = cmd.get<int>("duration");
if (det_name.empty()) {
std::cerr << "FATAL: path to detection model is not provided for the sample."
<< "Please specify it with --detector options."
<< std::endl;
return 1;
}
cv::gapi::kernels<custom::OCVBBoxes>(),
cv::GMat blob = cv::gapi::infer<custom::FaceDetector>(
copy);
pipeline.
setSource(cv::gapi::wip::make_src<cv::gapi::oak::ColorCamera>());
pipeline.start();
std::vector<cv::Rect> out_dets;
int frames = 0;
while (pipeline.pull(
cv::gout(out_mat, out_dets))) {
std::string name = "oak_infer_frame_" + std::to_string(frames) + ".png";
if (!out_dets.empty()) {
std::cout << "Got " << out_dets.size() << " detections on frame #" << frames << std::endl;
}
++frames;
if (frames == duration) {
pipeline.stop();
break;
}
}
std::cout << "Pipeline finished. Processed " << frames << " frames" << std::endl;
return 0;
}