1. cut and make a template image (size: 164x164), save it to faceSample.png.
2. do the following
$ opencv_createsamples -img faceSample.png -vec faceSample.vec -num 10000
2. have a look at the samples
$ opencv_createsamples -show -vec faceSample.vec
3. run
$ opencv_haartraining -data result -vec faceSample.vec -bg bg.txt
4. test
$ ./haarObjectDetection result.xml group.jpg
Here is group.jpg downloaded from google image search. sorry, privacy.
Actually, the template image is cropped from the group.jpg.
* haarObjectDetection.cpp
----------------------------------------------------------------------------
// modifed version of objectDetect.cpp
/**
* @file objectDetection.cpp
* @author A. Huaman ( based in the classic facedetect.cpp in samples/c )
* @brief A simplified version of facedetect.cpp, show how to load a cascade classifier and how to find objects (Face + eyes) in a video stream
*/
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
/** Function Headers */
void detectAndDisplay( Mat frame );
/** Global variables */
//-- Note, either copy these two files from opencv/data/haarscascades to your current folder, or change these locations
CascadeClassifier face_cascade;
string window_name = "Capture - Face detection";
RNG rng(12345);
int main( int argc, char *argv[] )
{
Mat frame;
if (argc!=3) {
cerr << "usage: " << argv[0] << " haar_cascade.xml testimage.jpg" << endl;
return 0;
}
//-- 1. Load the cascades
if( !face_cascade.load( argv[1] ) ){ printf("--(!)Error loading\n"); return -1; };
//-- 2. Read the video stream
frame = imread (argv[2]);
//-- 3. Apply the classifier to the frame
if( !frame.empty() )
{ detectAndDisplay( frame ); }
else
{ printf(" --(!) No captured frame -- Break!"); return 0; }
int c = waitKey();
return 0;
}
/**
* @function detectAndDisplay
*/
void detectAndDisplay( Mat frame )
{
std::vector<Rect> faces;
Mat frame_gray;
cvtColor( frame, frame_gray, COLOR_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
//-- Detect faces
face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );
for( size_t i = 0; i < faces.size(); i++ )
{
Point center( faces[i].x + faces[i].width/2, faces[i].y + faces[i].height/2 );
ellipse( frame, center, Size( faces[i].width/2, faces[i].height/2), 0, 0, 360, Scalar( 255, 0, 255 ), 2, 8, 0 );
}
//-- Show what you got
imshow( window_name, frame );
}
----------------------------------------------------------------------------
http://docs.opencv.org/doc/tutorials/objdetect/cascade_classifier/cascade_classifier.html
http://docs.opencv.org/doc/user_guide/ug_traincascade.html
opencv_traincascade
supports both Haar [Viola2001] and LBP [Liao2007] (Local Binary Patterns) features.
opencv_createsamples
is used to prepare a training dataset of positive and test samples.
The output is a file with *.vec extension, it is a binary format which contains images.
-- Training Data Preparation
Set of negative samples must be prepared manually, whereas set of positive samples is created using opencv_createsamples utility.
-- Negative Samples
Make a file of imagefile names
$ cat bg.txt
img/bg1.png
img/bg2.png
-- Positive Samples
Positive samples are created by opencv_createsamples utility.
They may be created from a single image with object
or from a collection of previously marked up images.
댓글 없음:
댓글 쓰기