#include <iomanip>
static void listDir(const char *path, std::vector<String>& files, bool r)
{
DIR *pDir;
struct dirent *ent;
char childpath[512];
pDir = opendir(path);
memset(childpath, 0, sizeof(childpath));
while ((ent = readdir(pDir)) != NULL)
{
if (ent->d_type & DT_DIR)
{
if (strcmp(ent->d_name, ".") == 0 || strcmp(ent->d_name, "..") == 0 || strcmp(ent->d_name, ".DS_Store") == 0)
{
continue;
}
if (r)
{
sprintf(childpath, "%s/%s", path, ent->d_name);
listDir(childpath,files,false);
}
}
else
{
if (strcmp(ent->d_name, ".DS_Store") != 0)
files.push_back(ent->d_name);
}
}
sort(files.begin(),files.end());
};
static int featureWrite(
const Mat &features,
const String &fname)
{
ofstream ouF;
ouF.open(fname.
c_str(), std::ofstream::binary);
if (!ouF)
{
cerr << "failed to open the file : " << fname << endl;
return 0;
}
for (
int r = 0; r < features.
rows; r++)
{
ouF.write(reinterpret_cast<const char*>(features.
ptr(r)), features.
cols*features.
elemSize());
}
ouF.close();
return 1;
}
int main(int argc, char** argv)
{
const String keys =
"{help | | This sample will extract features from reference images and target image for classification. You can add a mean_file if there little variance in data such as human faces, otherwise it is not so useful}" "{src_dir | ../data/images_all/ | Source direction of the images ready for being used for extract feature as gallery.}"
"{caffemodel | ../../testdata/cv/3d_triplet_iter_30000.caffemodel | caffe model for feature exrtaction.}"
"{network_forIMG | ../../testdata/cv/3d_triplet_testIMG.prototxt | Network definition file used for extracting feature from a single image and making a classification}"
"{mean_file | no | The mean file generated by Caffe from all gallery images, this could be used for mean value substraction from all images. If you want to use the mean file, you can set this as ../data/images_mean/triplet_mean.binaryproto.}"
"{target_img | ../data/images_all/4_78.png | Path of image waiting to be classified.}"
"{feature_blob | feat | Name of layer which will represent as the feature, in this network, ip1 or feat is well.}"
"{num_candidate | 15 | Number of candidates in gallery as the prediction result.}"
"{device | CPU | Device type: CPU or GPU}"
"{dev_id | 0 | Device id}"
"{gallery_out | 0 | Option on output binary features on gallery images}";
parser.about("Feature extraction and classification");
if (parser.has("help"))
{
parser.printMessage();
return 0;
}
String network_forIMG = parser.get<
String>(
"network_forIMG");
int num_candidate = parser.get<int>("num_candidate");
int gallery_out = parser.get<int>("gallery_out");
std::cout << "Using" << descriptor.getDeviceType() << std::endl;
if (strcmp(mean_file.c_str(), "no") == 0)
descriptor.loadNet(network_forIMG, caffemodel);
else
descriptor.loadNet(network_forIMG, caffemodel, mean_file);
std::vector<String> name_gallery;
listDir(src_dir.
c_str(), name_gallery,
false);
if (gallery_out)
{
ofstream namelist_out("gallelist.txt");
for (unsigned int i = 0; i < name_gallery.size(); i++)
namelist_out << name_gallery.at(i) << endl;
}
for (unsigned int i = 0; i < name_gallery.size(); i++)
{
name_gallery[i] = src_dir + name_gallery[i];
}
std::vector<cv::Mat> img_gallery;
for (unsigned int i = 0; i < name_gallery.size(); i++)
{
img_gallery.push_back(
cv::imread(name_gallery[i]));
}
descriptor.extract(img_gallery, feature_reference, feature_blob);
if (gallery_out)
{
std::cout << std::endl << "---------- Features of gallery images ----------" << std::endl;
for (
int i = 0; i < feature_reference.
rows; i++)
std::cout << feature_reference.
row(i) << endl;
std::cout << std::endl << "---------- Saving features of gallery images into feature.bin ----------" << std::endl;
featureWrite(feature_reference, "feature.bin");
}
else
{
std::cout << std::endl << "---------- Prediction for " << target_img << " ----------" << std::endl;
std::cout << std::endl << "---------- Features of gallery images ----------" << std::endl;
std::vector<std::pair<String, float> > prediction;
for (
int i = 0; i < feature_reference.
rows; i++)
std::cout << feature_reference.
row(i) << endl;
descriptor.extract(img, feature_test, feature_blob);
std::vector<std::vector<cv::DMatch> > matches;
matcher.knnMatch(feature_test, feature_reference, matches, num_candidate);
std::cout << std::endl << "---------- Features of target image: " << target_img << "----------" << endl << feature_test << std::endl;
std::cout << std::endl << "---------- Prediction result(Distance - File Name in Gallery) ----------" << std::endl;
for (size_t i = 0; i < matches[0].size(); ++i)
{
std::cout << i << " - " << std::fixed << std::setprecision(2) << name_gallery[matches[0][i].trainIdx] << " - \"" << matches[0][i].distance << "\"" << std::endl;
}
}
return 0;
}