#include <iostream>
#include <stdlib.h>
#include <time.h>
static void listDir(const char *path, std::vector<String>& files, bool r)
{
DIR *pDir;
struct dirent *ent;
char childpath[512];
pDir = opendir(path);
memset(childpath, 0, sizeof(childpath));
while ((ent = readdir(pDir)) != NULL)
{
if (ent->d_type & DT_DIR)
{
if (strcmp(ent->d_name, ".") == 0 || strcmp(ent->d_name, "..") == 0 || strcmp(ent->d_name, ".DS_Store") == 0)
{
continue;
}
if (r)
{
sprintf(childpath, "%s/%s", path, ent->d_name);
listDir(childpath,files,false);
}
}
else
{
if (strcmp(ent->d_name, ".DS_Store") != 0)
files.push_back(ent->d_name);
}
}
sort(files.begin(),files.end());
};
int main(int argc, char *argv[])
{
const String keys =
"{help | | demo :$ ./sphereview_test -ite_depth=2 -plymodel=../data/3Dmodel/ape.ply -imagedir=../data/images_all/ -labeldir=../data/label_all.txt -num_class=6 -label_class=0, then press 'q' to run the demo for images generation when you see the gray background and a coordinate.}" "{ite_depth | 3 | Iteration of sphere generation.}"
"{plymodel | ../data/3Dmodel/ape.ply | Path of the '.ply' file for image rendering. }"
"{imagedir | ../data/images_all/ | Path of the generated images for one particular .ply model. }"
"{labeldir | ../data/label_all.txt | Path of the generated images for one particular .ply model. }"
"{bakgrdir | | Path of the backgroud images sets. }"
"{cam_head_x | 0 | Head of the camera. }"
"{cam_head_y | 0 | Head of the camera. }"
"{cam_head_z | -1 | Head of the camera. }"
"{semisphere | 1 | Camera only has positions on half of the whole sphere. }"
"{z_range | 0.6 | Maximum camera position on z axis. }"
"{center_gen | 0 | Find center from all points. }"
"{image_size | 128 | Size of captured images. }"
"{label_class | | Class label of current .ply model. }"
"{label_item | | Item label of current .ply model. }"
"{rgb_use | 0 | Use RGB image or grayscale. }"
"{num_class | 6 | Total number of classes of models. }"
"{binary_out | 0 | Produce binaryfiles for images and label. }"
"{view_region | 0 | Take a special view of front or back angle}";
parser.about("Generating training data for CNN with triplet loss");
if (parser.has("help"))
{
parser.printMessage();
return 0;
}
int ite_depth = parser.get<int>("ite_depth");
int label_class = parser.get<int>("label_class");
int label_item = parser.get<int>("label_item");
float cam_head_x = parser.get<float>("cam_head_x");
float cam_head_y = parser.get<float>("cam_head_y");
float cam_head_z = parser.get<float>("cam_head_z");
int semisphere = parser.get<int>("semisphere");
float z_range = parser.get<float>("z_range");
int center_gen = parser.get<int>("center_gen");
int image_size = parser.get<int>("image_size");
int rgb_use = parser.get<int>("rgb_use");
int num_class = parser.get<int>("num_class");
int binary_out = parser.get<int>("binary_out");
int view_region = parser.get<int>("view_region");
double obj_dist, bg_dist, y_range;
if (view_region == 1 || view_region == 2)
{
if (label_class == 12)
obj_dist = 340;
else
obj_dist = 250;
ite_depth = ite_depth + 1;
bg_dist = 700;
y_range = 0.85;
}
else if (view_region == 0)
{
obj_dist = 370;
bg_dist = 400;
}
if (label_class == 5 || label_class == 10 || label_class == 11 || label_class == 12)
ite_depth = ite_depth + 1;
std::vector<cv::Point3d> campos;
std::vector<cv::Point3d> campos_temp = ViewSphere.CameraPos;
if (semisphere == 1)
{
if (view_region == 1)
{
for (int pose = 0; pose < static_cast<int>(campos_temp.size()); pose++)
{
if (campos_temp.at(pose).z >= 0 && campos_temp.at(pose).z < z_range && campos_temp.at(pose).y < -y_range)
campos.push_back(campos_temp.at(pose));
}
}
else if (view_region == 2)
{
for (int pose = 0; pose < static_cast<int>(campos_temp.size()); pose++)
{
if (campos_temp.at(pose).z >= 0 && campos_temp.at(pose).z < z_range && campos_temp.at(pose).y > y_range)
campos.push_back(campos_temp.at(pose));
}
}
else
{
if (label_class == 10)
{
for (int pose = 0; pose < static_cast<int>(campos_temp.size()); pose++)
{
if (campos_temp.at(pose).z >= 0 && campos_temp.at(pose).z < z_range && campos_temp.at(pose).y < -0.4)
campos.push_back(campos_temp.at(pose));
}
}
else
{
for (int pose = 0; pose < static_cast<int>(campos_temp.size()); pose++)
{
if (campos_temp.at(pose).z >= 0 && campos_temp.at(pose).z < z_range)
campos.push_back(campos_temp.at(pose));
}
}
}
}
else
{
if (view_region == 1)
{
for (int pose = 0; pose < static_cast<int>(campos_temp.size()); pose++)
{
if (campos_temp.at(pose).z < 0.2 && campos_temp.at(pose).z > -0.2 && campos_temp.at(pose).y < -y_range)
campos.push_back(campos_temp.at(pose));
}
}
else if (view_region == 2)
{
for (int pose = 0; pose < static_cast<int>(campos_temp.size()); pose++)
{
if (campos_temp.at(pose).z < 0.2 && campos_temp.at(pose).z > -0.2 && campos_temp.at(pose).y > y_range)
campos.push_back(campos_temp.at(pose));
}
}
else
{
for (int pose = 0; pose < static_cast<int>(campos_temp.size()); pose++)
{
if (campos_temp.at(pose).z < 0.2 && campos_temp.at(pose).z > -0.6)
campos.push_back(campos_temp.at(pose));
}
}
}
std::fstream imglabel;
imglabel.open(labeldir.c_str(), fstream::app|fstream::out);
bool camera_pov = true;
myWindow.setWindowSize(
Size(image_size,image_size));
myWindow.spinOnce();
if (center_gen)
cam_focal_point = ViewSphere.getCenter(objmesh.
cloud);
else
const char* headerPath = "../data/header_for_";
const char* binaryPath = "../data/binary_";
if (binary_out)
{
ViewSphere.createHeader(static_cast<int>(campos.size()), image_size, image_size, headerPath);
}
float radius = ViewSphere.getRadius(objmesh.
cloud, cam_focal_point);
cam_focal_point = cam_focal_point/radius*100;
cam_y_dir.
x = cam_head_x;
cam_y_dir.
y = cam_head_y;
cam_y_dir.
z = cam_head_z;
char temp[1024];
std::vector<String> name_bkg;
if (bakgrdir.size() != 0)
{
listDir(bakgrdir.c_str(), name_bkg, false);
for (unsigned int i = 0; i < name_bkg.size(); i++)
{
name_bkg.at(i) = bakgrdir + name_bkg.at(i);
}
}
size_t cnt_img;
srand((int)time(0));
do
{
cnt_img = 0;
for(int pose = 0; pose < static_cast<int>(campos.size()); pose++){
int label_x, label_y, label_z;
label_x = static_cast<int>(campos.at(pose).x*100);
label_y = static_cast<int>(campos.at(pose).y*100);
label_z = static_cast<int>(campos.at(pose).z*100);
sprintf (temp,"%02i_%02i_%04i_%04i_%04i_%02i", label_class, label_item, label_x, label_y, label_z, static_cast<int>(obj_dist/100));
filename += ".png";
imglabel << filename << ' ' << label_class << endl;
filename = imagedir + filename;
if (view_region != 0)
{
cam_focal_point.
x = cam_focal_point.
y - label_x/5;
}
Affine3f cam_pose =
viz::makeCameraPose(campos.at(pose)*obj_dist+cam_focal_point, cam_focal_point, cam_y_dir*obj_dist+cam_focal_point);
Affine3f cloud_pose_global = transform * cloud_pose;
if (!camera_pov)
{
myWindow.showWidget("CPW", cpw, cam_pose);
myWindow.showWidget("CPW_FRUSTUM", cpw_frustum, cam_pose);
}
if (bakgrdir.size() != 0)
{
cv::viz::WImage3D background_widget(img_bg,
Size2d(image_size*4.2, image_size*4.2),
Vec3d(-campos.at(pose)*bg_dist+cam_focal_point),
Vec3d(campos.at(pose)*bg_dist-cam_focal_point),
Vec3d(0,0,-1)*bg_dist+
Vec3d(0,2*cam_focal_point.y,0));
myWindow.showWidget("bgwidget", background_widget, cloud_pose_global);
}
myWindow.showWidget("targetwidget", mesh_widget, cloud_pose_global);
if (camera_pov)
myWindow.setViewerPose(cam_pose);
myWindow.saveScreenshot(filename);
if (binary_out)
{
ViewSphere.writeBinaryfile(filename, binaryPath, headerPath,static_cast<int>(campos.size())*num_class, label_class, static_cast<int>(campos.at(pose).x*100), static_cast<int>(campos.at(pose).y*100), static_cast<int>(campos.at(pose).z*100), rgb_use);
}
cnt_img++;
}
} while (cnt_img != campos.size());
imglabel.close();
return 1;
};
Here is collection images created by this demo using 4 model.