Face Recognition Algorithms EigenFace、FisherFace、LBPH

1.OpenCV(EigenFace、FisherFace、LBPH)php

https://docs.opencv.org/master/db/d3a/facedetect_8cpp-example.htmlhtml

/*This program demonstrates usage of the Cascade classifier class*/
#include "opencv2/objdetect.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include 
using namespace std;
using namespace cv;
static void help()
{
    cout << "\nThis program demonstrates the use of cv::CascadeClassifier class to detect objects (Face + eyes). You can use Haar or LBP features.\n"
            "This classifier can recognize many kinds of rigid objects, once the appropriate classifier is trained.\n"
            "It's most known use is for faces.\n"
            "Usage:\n"
            "./facedetect [--cascade=<cascade_path> this is the primary trained classifier such as frontal face]\n"
               "   [--nested-cascade[=nested_cascade_path this an optional secondary classifier such as eyes]]\n"
               "   [--scale=]\n"
               "   [--try-flip]\n"
               "   [filename|camera_index]\n\n"
            "see facedetect.cmd for one call:\n"
            "./facedetect --cascade=\"../../data/haarcascades/haarcascade_frontalface_alt.xml\" --nested-cascade=\"../../data/haarcascades/haarcascade_eye_tree_eyeglasses.xml\" --scale=1.3\n\n"
            "During execution:\n\tHit any key to quit.\n"
            "\tUsing OpenCV version " << CV_VERSION << "\n" << endl;
}
void detectAndDraw( Mat& img, CascadeClassifier& cascade,
                    CascadeClassifier& nestedCascade,
                    double scale, bool tryflip );
string cascadeName;
string nestedCascadeName;
int main( int argc, const char** argv )
{
    VideoCapture capture;
    Mat frame, image;
    string inputName;
    bool tryflip;
    CascadeClassifier cascade, nestedCascade;
    double scale;
    cv::CommandLineParser parser(argc, argv,
        "{help h||}"
        "{cascade|../../data/haarcascades/haarcascade_frontalface_alt.xml|}"
        "{nested-cascade|../../data/haarcascades/haarcascade_eye_tree_eyeglasses.xml|}"
        "{scale|1|}{try-flip||}{@filename||}"
    );
    if (parser.has("help"))
    {
        help();
        return 0;
    }
    cascadeName = parser.get("cascade");
    nestedCascadeName = parser.get("nested-cascade");
    scale = parser.get("scale");
    if (scale < 1)
        scale = 1;
    tryflip = parser.has("try-flip");
    inputName = parser.get("@filename");
    if (!parser.check())
    {
        parser.printErrors();
        return 0;
    }
    if ( !nestedCascade.load( nestedCascadeName ) )
        cerr << "WARNING: Could not load classifier cascade for nested objects" << endl;
    if( !cascade.load( cascadeName ) )
    {
        cerr << "ERROR: Could not load classifier cascade" << endl;
        help();
        return -1;
    }
    if( inputName.empty() || (isdigit(inputName[0]) && inputName.size() == 1) )
    {
        int camera = inputName.empty() ? 0 : inputName[0] - '0';
        if(!capture.open(camera))
            cout << "Capture from camera #" <<  camera << " didn't work" << endl;
    }
    else if( inputName.size() )
    {
        image = imread( inputName, 1 );
        if( image.empty() )
        {
            if(!capture.open( inputName ))
                cout << "Could not read " << inputName << endl;
        }
    }
    else
    {
        image = imread( "../data/lena.jpg", 1 );
        if(image.empty()) cout << "Couldn't read ../data/lena.jpg" << endl;
    }
    if( capture.isOpened() )
    {
        cout << "Video capturing has been started ..." << endl;
        for(;;)
        {
            capture >> frame;
            if( frame.empty() )
                break;
            Mat frame1 = frame.clone();
            detectAndDraw( frame1, cascade, nestedCascade, scale, tryflip );
            char c = (char)waitKey(10);
            if( c == 27 || c == 'q' || c == 'Q' )
                break;
        }
    }
    else
    {
        cout << "Detecting face(s) in " << inputName << endl;
        if( !image.empty() )
        {
            detectAndDraw( image, cascade, nestedCascade, scale, tryflip );
            waitKey(0);
        }
        else if( !inputName.empty() )
        {
            /* assume it is a text file containing the
            list of the image filenames to be processed - one per line */
            FILE* f = fopen( inputName.c_str(), "rt" );
            if( f )
            {
                char buf[1000+1];
                while( fgets( buf, 1000, f ) )
                {
                    int len = (int)strlen(buf);
                    while( len > 0 && isspace(buf[len-1]) )
                        len--;
                    buf[len] = '\0';
                    cout << "file " << buf << endl;
                    image = imread( buf, 1 );
                    if( !image.empty() )
                    {
                        detectAndDraw( image, cascade, nestedCascade, scale, tryflip );
                        char c = (char)waitKey(0);
                        if( c == 27 || c == 'q' || c == 'Q' )
                            break;
                    }
                    else
                    {
                        cerr << "Aw snap, couldn't read image " << buf << endl;
                    }
                }
                fclose(f);
            }
        }
    }
    return 0;
}
void detectAndDraw( Mat& img, CascadeClassifier& cascade,
                    CascadeClassifier& nestedCascade,
                    double scale, bool tryflip )
{
    double t = 0;
    vector faces, faces2;
    const static Scalar colors[] =
    {
        Scalar(255,0,0),
        Scalar(255,128,0),
        Scalar(255,255,0),
        Scalar(0,255,0),
        Scalar(0,128,255),
        Scalar(0,255,255),
        Scalar(0,0,255),
        Scalar(255,0,255)
    };
    Mat gray, smallImg;
    cvtColor( img, gray, COLOR_BGR2GRAY );
    double fx = 1 / scale;
    resize( gray, smallImg, Size(), fx, fx, INTER_LINEAR_EXACT );
    equalizeHist( smallImg, smallImg );
    t = (double)getTickCount();
    cascade.detectMultiScale( smallImg, faces,
        1.1, 2, 0
        //|CASCADE_FIND_BIGGEST_OBJECT
        //|CASCADE_DO_ROUGH_SEARCH
        |CASCADE_SCALE_IMAGE,
        Size(30, 30) );
    if( tryflip )
    {
        flip(smallImg, smallImg, 1);
        cascade.detectMultiScale( smallImg, faces2,
                                 1.1, 2, 0
                                 //|CASCADE_FIND_BIGGEST_OBJECT
                                 //|CASCADE_DO_ROUGH_SEARCH
                                 |CASCADE_SCALE_IMAGE,
                                 Size(30, 30) );
        for( vector::const_iterator r = faces2.begin(); r != faces2.end(); ++r )
        {
            faces.push_back(Rect(smallImg.cols - r->x - r->width, r->y, r->width, r->height));
        }
    }
    t = (double)getTickCount() - t;
    printf( "detection time = %g ms\n", t*1000/getTickFrequency());
    for ( size_t i = 0; i < faces.size(); i++ )
    {
        Rect r = faces[i];
        Mat smallImgROI;
        vector nestedObjects;
        Point center;
        Scalar color = colors[i%8];
        int radius;
        double aspect_ratio = (double)r.width/r.height;
        if( 0.75 < aspect_ratio && aspect_ratio < 1.3 )
        {
            center.x = cvRound((r.x + r.width*0.5)*scale);
            center.y = cvRound((r.y + r.height*0.5)*scale);
            radius = cvRound((r.width + r.height)*0.25*scale);
            circle( img, center, radius, color, 3, 8, 0 );
        }
        else
            rectangle( img, Point(cvRound(r.x*scale), cvRound(r.y*scale)),
                       Point(cvRound((r.x + r.width-1)*scale), cvRound((r.y + r.height-1)*scale)),
                       color, 3, 8, 0);
        if( nestedCascade.empty() )
            continue;
        smallImgROI = smallImg( r );
        nestedCascade.detectMultiScale( smallImgROI, nestedObjects,
            1.1, 2, 0
            //|CASCADE_FIND_BIGGEST_OBJECT
            //|CASCADE_DO_ROUGH_SEARCH
            //|CASCADE_DO_CANNY_PRUNING
            |CASCADE_SCALE_IMAGE,
            Size(30, 30) );
        for ( size_t j = 0; j < nestedObjects.size(); j++ )
        {
            Rect nr = nestedObjects[j];
            center.x = cvRound((r.x + nr.x + nr.width*0.5)*scale);
            center.y = cvRound((r.y + nr.y + nr.height*0.5)*scale);
            radius = cvRound((nr.width + nr.height)*0.25*scale);
            circle( img, center, radius, color, 3, 8, 0 );
        }
    }
    imshow( "result", img );
}

https://docs.opencv.org/master/db/d7c/group__face.htmlpython

 

特徵臉方法(Eigenface)

/*Eigenfaces in OpenCV*/
/*
 * Copyright (c) 2011. Philipp Wagner <bytefish[at]gmx[dot]de>.
 * Released to public domain under terms of the BSD Simplified license.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 *   * Redistributions of source code must retain the above copyright
 *     notice, this list of conditions and the following disclaimer.
 *   * Redistributions in binary form must reproduce the above copyright
 *     notice, this list of conditions and the following disclaimer in the
 *     documentation and/or other materials provided with the distribution.
 *   * Neither the name of the organization nor the names of its contributors
 *     may be used to endorse or promote products derived from this software
 *     without specific prior written permission.
 *
 *   See <http://www.opensource.org/licenses/bsd-license>
 */
#include "opencv2/core.hpp"
#include "opencv2/face.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include 
#include 
#include 
using namespace cv;
using namespace cv::face;
using namespace std;
static Mat norm_0_255(InputArray _src) {
    Mat src = _src.getMat();
    // Create and return normalized image:
    Mat dst;
    switch(src.channels()) {
    case 1:
        cv::normalize(_src, dst, 0, 255, NORM_MINMAX, CV_8UC1);
        break;
    case 3:
        cv::normalize(_src, dst, 0, 255, NORM_MINMAX, CV_8UC3);
        break;
    default:
        src.copyTo(dst);
        break;
    }
    return dst;
}
static void read_csv(const string& filename, vector& images, vector& labels, char separator = ';') {
    std::ifstream file(filename.c_str(), ifstream::in);
    if (!file) {
        string error_message = "No valid input file was given, please check the given filename.";
        CV_Error(Error::StsBadArg, error_message);
    }
    string line, path, classlabel;
    while (getline(file, line)) {
        stringstream liness(line);
        getline(liness, path, separator);
        getline(liness, classlabel);
        if(!path.empty() && !classlabel.empty()) {
            images.push_back(imread(path, 0));
            labels.push_back(atoi(classlabel.c_str()));
        }
    }
}
int main(int argc, const char *argv[]) {
    // Check for valid command line arguments, print usage
    // if no arguments were given.
    if (argc < 2) {
        cout << "usage: " << argv[0] << "  <output_folder> " << endl;
        exit(1);
    }
    string output_folder = ".";
    if (argc == 3) {
        output_folder = string(argv[2]);
    }
    // Get the path to your CSV.
    string fn_csv = string(argv[1]);
    // These vectors hold the images and corresponding labels.
    vector images;
    vector labels;
    // Read in the data. This can fail if no valid
    // input filename is given.
    try {
        read_csv(fn_csv, images, labels);
    } catch (cv::Exception& e) {
        cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
        // nothing more we can do
        exit(1);
    }
    // Quit if there are not enough images for this demo.
    if(images.size() <= 1) {
        string error_message = "This demo needs at least 2 images to work. Please add more images to your data set!";
        CV_Error(Error::StsError, error_message);
    }
    // Get the height from the first image. We'll need this
    // later in code to reshape the images to their original
    // size:
    int height = images[0].rows;
    // The following lines simply get the last images from
    // your dataset and remove it from the vector. This is
    // done, so that the training data (which we learn the
    // cv::BasicFaceRecognizer on) and the test data we test
    // the model with, do not overlap.
    Mat testSample = images[images.size() - 1];
    int testLabel = labels[labels.size() - 1];
    images.pop_back();
    labels.pop_back();
    // The following lines create an Eigenfaces model for
    // face recognition and train it with the images and
    // labels read from the given CSV file.
    // This here is a full PCA, if you just want to keep
    // 10 principal components (read Eigenfaces), then call
    // the factory method like this:
    //
    //      EigenFaceRecognizer::create(10);
    //
    // If you want to create a FaceRecognizer with a
    // confidence threshold (e.g. 123.0), call it with:
    //
    //      EigenFaceRecognizer::create(10, 123.0);
    //
    // If you want to use _all_ Eigenfaces and have a threshold,
    // then call the method like this:
    //
    //      EigenFaceRecognizer::create(0, 123.0);
    //
    Ptr model = EigenFaceRecognizer::create();
    model->train(images, labels);
    // The following line predicts the label of a given
    // test image:
    int predictedLabel = model->predict(testSample);
    //
    // To get the confidence of a prediction call the model with:
    //
    //      int predictedLabel = -1;
    //      double confidence = 0.0;
    //      model->predict(testSample, predictedLabel, confidence);
    //
    string result_message = format("Predicted class = %d / Actual class = %d.", predictedLabel, testLabel);
    cout << result_message << endl;
    // Here is how to get the eigenvalues of this Eigenfaces model:
    Mat eigenvalues = model->getEigenValues();
    // And we can do the same to display the Eigenvectors (read Eigenfaces):
    Mat W = model->getEigenVectors();
    // Get the sample mean from the training data
    Mat mean = model->getMean();
    // Display or save:
    if(argc == 2) {
        imshow("mean", norm_0_255(mean.reshape(1, images[0].rows)));
    } else {
        imwrite(format("%s/mean.png", output_folder.c_str()), norm_0_255(mean.reshape(1, images[0].rows)));
    }
    // Display or save the Eigenfaces:
    for (int i = 0; i < min(10, W.cols); i++) {
        string msg = format("Eigenvalue #%d = %.5f", i, eigenvalues.at(i));
        cout << msg << endl;
        // get eigenvector #i
        Mat ev = W.col(i).clone();
        // Reshape to original size & normalize to [0...255] for imshow.
        Mat grayscale = norm_0_255(ev.reshape(1, height));
        // Show the image & apply a Jet colormap for better sensing.
        Mat cgrayscale;
        applyColorMap(grayscale, cgrayscale, COLORMAP_JET);
        // Display or save:
        if(argc == 2) {
            imshow(format("eigenface_%d", i), cgrayscale);
        } else {
            imwrite(format("%s/eigenface_%d.png", output_folder.c_str(), i), norm_0_255(cgrayscale));
        }
    }
    // Display or save the image reconstruction at some predefined steps:
    for(int num_components = min(W.cols, 10); num_components < min(W.cols, 300); num_components+=15) {
        // slice the eigenvectors from the model
        Mat evs = Mat(W, Range::all(), Range(0, num_components));
        Mat projection = LDA::subspaceProject(evs, mean, images[0].reshape(1,1));
        Mat reconstruction = LDA::subspaceReconstruct(evs, mean, projection);
        // Normalize the result:
        reconstruction = norm_0_255(reconstruction.reshape(1, images[0].rows));
        // Display or save:
        if(argc == 2) {
            imshow(format("eigenface_reconstruction_%d", num_components), reconstruction);
        } else {
            imwrite(format("%s/eigenface_reconstruction_%d.png", output_folder.c_str(), num_components), reconstruction);
        }
    }
    // Display if we are not writing to an output folder:
    if(argc == 2) {
        waitKey(0);
    }
    return 0;
}

FisherFace(LDA線性判別分析)

/*Fisherfaces in OpenCV*/
/*
 * Copyright (c) 2011. Philipp Wagner <bytefish[at]gmx[dot]de>.
 * Released to public domain under terms of the BSD Simplified license.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 *   * Redistributions of source code must retain the above copyright
 *     notice, this list of conditions and the following disclaimer.
 *   * Redistributions in binary form must reproduce the above copyright
 *     notice, this list of conditions and the following disclaimer in the
 *     documentation and/or other materials provided with the distribution.
 *   * Neither the name of the organization nor the names of its contributors
 *     may be used to endorse or promote products derived from this software
 *     without specific prior written permission.
 *
 *   See <http://www.opensource.org/licenses/bsd-license>
 */
#include "opencv2/core.hpp"
#include "opencv2/face.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include 
#include 
#include 
using namespace cv;
using namespace cv::face;
using namespace std;
static Mat norm_0_255(InputArray _src) {
    Mat src = _src.getMat();
    // Create and return normalized image:
    Mat dst;
    switch(src.channels()) {
    case 1:
        cv::normalize(_src, dst, 0, 255, NORM_MINMAX, CV_8UC1);
        break;
    case 3:
        cv::normalize(_src, dst, 0, 255, NORM_MINMAX, CV_8UC3);
        break;
    default:
        src.copyTo(dst);
        break;
    }
    return dst;
}
static void read_csv(const string& filename, vector& images, vector& labels, char separator = ';') {
    std::ifstream file(filename.c_str(), ifstream::in);
    if (!file) {
        string error_message = "No valid input file was given, please check the given filename.";
        CV_Error(Error::StsBadArg, error_message);
    }
    string line, path, classlabel;
    while (getline(file, line)) {
        stringstream liness(line);
        getline(liness, path, separator);
        getline(liness, classlabel);
        if(!path.empty() && !classlabel.empty()) {
            images.push_back(imread(path, 0));
            labels.push_back(atoi(classlabel.c_str()));
        }
    }
}
int main(int argc, const char *argv[]) {
    // Check for valid command line arguments, print usage
    // if no arguments were given.
    if (argc < 2) {
        cout << "usage: " << argv[0] << "  <output_folder> " << endl;
        exit(1);
    }
    string output_folder = ".";
    if (argc == 3) {
        output_folder = string(argv[2]);
    }
    // Get the path to your CSV.
    string fn_csv = string(argv[1]);
    // These vectors hold the images and corresponding labels.
    vector images;
    vector labels;
    // Read in the data. This can fail if no valid
    // input filename is given.
    try {
        read_csv(fn_csv, images, labels);
    } catch (cv::Exception& e) {
        cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
        // nothing more we can do
        exit(1);
    }
    // Quit if there are not enough images for this demo.
    if(images.size() <= 1) {
        string error_message = "This demo needs at least 2 images to work. Please add more images to your data set!";
        CV_Error(Error::StsError, error_message);
    }
    // Get the height from the first image. We'll need this
    // later in code to reshape the images to their original
    // size:
    int height = images[0].rows;
    // The following lines simply get the last images from
    // your dataset and remove it from the vector. This is
    // done, so that the training data (which we learn the
    // cv::BasicFaceRecognizer on) and the test data we test
    // the model with, do not overlap.
    Mat testSample = images[images.size() - 1];
    int testLabel = labels[labels.size() - 1];
    images.pop_back();
    labels.pop_back();
    // The following lines create an Fisherfaces model for
    // face recognition and train it with the images and
    // labels read from the given CSV file.
    // If you just want to keep 10 Fisherfaces, then call
    // the factory method like this:
    //
    //      FisherFaceRecognizer::create(10);
    //
    // However it is not useful to discard Fisherfaces! Please
    // always try to use _all_ available Fisherfaces for
    // classification.
    //
    // If you want to create a FaceRecognizer with a
    // confidence threshold (e.g. 123.0) and use _all_
    // Fisherfaces, then call it with:
    //
    //      FisherFaceRecognizer::create(0, 123.0);
    //
    Ptr model = FisherFaceRecognizer::create();
    model->train(images, labels);
    // The following line predicts the label of a given
    // test image:
    int predictedLabel = model->predict(testSample);
    //
    // To get the confidence of a prediction call the model with:
    //
    //      int predictedLabel = -1;
    //      double confidence = 0.0;
    //      model->predict(testSample, predictedLabel, confidence);
    //
    string result_message = format("Predicted class = %d / Actual class = %d.", predictedLabel, testLabel);
    cout << result_message << endl;
    // Here is how to get the eigenvalues of this Eigenfaces model:
    Mat eigenvalues = model->getEigenValues();
    // And we can do the same to display the Eigenvectors (read Eigenfaces):
    Mat W = model->getEigenVectors();
    // Get the sample mean from the training data
    Mat mean = model->getMean();
    // Display or save:
    if(argc == 2) {
        imshow("mean", norm_0_255(mean.reshape(1, images[0].rows)));
    } else {
        imwrite(format("%s/mean.png", output_folder.c_str()), norm_0_255(mean.reshape(1, images[0].rows)));
    }
    // Display or save the first, at most 16 Fisherfaces:
    for (int i = 0; i < min(16, W.cols); i++) {
        string msg = format("Eigenvalue #%d = %.5f", i, eigenvalues.at(i));
        cout << msg << endl;
        // get eigenvector #i
        Mat ev = W.col(i).clone();
        // Reshape to original size & normalize to [0...255] for imshow.
        Mat grayscale = norm_0_255(ev.reshape(1, height));
        // Show the image & apply a Bone colormap for better sensing.
        Mat cgrayscale;
        applyColorMap(grayscale, cgrayscale, COLORMAP_BONE);
        // Display or save:
        if(argc == 2) {
            imshow(format("fisherface_%d", i), cgrayscale);
        } else {
            imwrite(format("%s/fisherface_%d.png", output_folder.c_str(), i), norm_0_255(cgrayscale));
        }
    }
    // Display or save the image reconstruction at some predefined steps:
    for(int num_component = 0; num_component < min(16, W.cols); num_component++) {
        // Slice the Fisherface from the model:
        Mat ev = W.col(num_component);
        Mat projection = LDA::subspaceProject(ev, mean, images[0].reshape(1,1));
        Mat reconstruction = LDA::subspaceReconstruct(ev, mean, projection);
        // Normalize the result:
        reconstruction = norm_0_255(reconstruction.reshape(1, images[0].rows));
        // Display or save:
        if(argc == 2) {
            imshow(format("fisherface_reconstruction_%d", num_component), reconstruction);
        } else {
            imwrite(format("%s/fisherface_reconstruction_%d.png", output_folder.c_str(), num_component), reconstruction);
        }
    }
    // Display if we are not writing to an output folder:
    if(argc == 2) {
        waitKey(0);
    }
    return 0;
}

LBPH

/*Local Binary Patterns Histograms in OpenCV*/
/*
 * Copyright (c) 2011. Philipp Wagner <bytefish[at]gmx[dot]de>.
 * Released to public domain under terms of the BSD Simplified license.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 *   * Redistributions of source code must retain the above copyright
 *     notice, this list of conditions and the following disclaimer.
 *   * Redistributions in binary form must reproduce the above copyright
 *     notice, this list of conditions and the following disclaimer in the
 *     documentation and/or other materials provided with the distribution.
 *   * Neither the name of the organization nor the names of its contributors
 *     may be used to endorse or promote products derived from this software
 *     without specific prior written permission.
 *
 *   See <http://www.opensource.org/licenses/bsd-license>
 */
#include "opencv2/core.hpp"
#include "opencv2/face.hpp"
#include "opencv2/highgui.hpp"
#include 
#include 
#include 
using namespace cv;
using namespace cv::face;
using namespace std;
static void read_csv(const string& filename, vector& images, vector& labels, char separator = ';') {
    std::ifstream file(filename.c_str(), ifstream::in);
    if (!file) {
        string error_message = "No valid input file was given, please check the given filename.";
        CV_Error(Error::StsBadArg, error_message);
    }
    string line, path, classlabel;
    while (getline(file, line)) {
        stringstream liness(line);
        getline(liness, path, separator);
        getline(liness, classlabel);
        if(!path.empty() && !classlabel.empty()) {
            images.push_back(imread(path, 0));
            labels.push_back(atoi(classlabel.c_str()));
        }
    }
}
int main(int argc, const char *argv[]) {
    // Check for valid command line arguments, print usage
    // if no arguments were given.
    if (argc != 2) {
        cout << "usage: " << argv[0] << " " << endl;
        exit(1);
    }
    // Get the path to your CSV.
    string fn_csv = string(argv[1]);
    // These vectors hold the images and corresponding labels.
    vector images;
    vector labels;
    // Read in the data. This can fail if no valid
    // input filename is given.
    try {
        read_csv(fn_csv, images, labels);
    } catch (cv::Exception& e) {
        cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
        // nothing more we can do
        exit(1);
    }
    // Quit if there are not enough images for this demo.
    if(images.size() <= 1) {
        string error_message = "This demo needs at least 2 images to work. Please add more images to your data set!";
        CV_Error(Error::StsError, error_message);
    }
    // The following lines simply get the last images from
    // your dataset and remove it from the vector. This is
    // done, so that the training data (which we learn the
    // cv::LBPHFaceRecognizer on) and the test data we test
    // the model with, do not overlap.
    Mat testSample = images[images.size() - 1];
    int testLabel = labels[labels.size() - 1];
    images.pop_back();
    labels.pop_back();
    // The following lines create an LBPH model for
    // face recognition and train it with the images and
    // labels read from the given CSV file.
    //
    // The LBPHFaceRecognizer uses Extended Local Binary Patterns
    // (it's probably configurable with other operators at a later
    // point), and has the following default values
    //
    //      radius = 1
    //      neighbors = 8
    //      grid_x = 8
    //      grid_y = 8
    //
    // So if you want a LBPH FaceRecognizer using a radius of
    // 2 and 16 neighbors, call the factory method with:
    //
    //      cv::face::LBPHFaceRecognizer::create(2, 16);
    //
    // And if you want a threshold (e.g. 123.0) call it with its default values:
    //
    //      cv::face::LBPHFaceRecognizer::create(1,8,8,8,123.0)
    //
    Ptr model = LBPHFaceRecognizer::create();
    model->train(images, labels);
    // The following line predicts the label of a given
    // test image:
    int predictedLabel = model->predict(testSample);
    //
    // To get the confidence of a prediction call the model with:
    //
    //      int predictedLabel = -1;
    //      double confidence = 0.0;
    //      model->predict(testSample, predictedLabel, confidence);
    //
    string result_message = format("Predicted class = %d / Actual class = %d.", predictedLabel, testLabel);
    cout << result_message << endl;
    // First we'll use it to set the threshold of the LBPHFaceRecognizer
    // to 0.0 without retraining the model. This can be useful if
    // you are evaluating the model:
    //
    model->setThreshold(0.0);
    // Now the threshold of this model is set to 0.0. A prediction
    // now returns -1, as it's impossible to have a distance below
    // it
    predictedLabel = model->predict(testSample);
    cout << "Predicted class = " << predictedLabel << endl;
    // Show some informations about the model, as there's no cool
    // Model data to display as in Eigenfaces/Fisherfaces.
    // Due to efficiency reasons the LBP images are not stored
    // within the model:
    cout << "Model Information:" << endl;
    string model_info = format("\tLBPH(radius=%i, neighbors=%i, grid_x=%i, grid_y=%i, threshold=%.2f)",
            model->getRadius(),
            model->getNeighbors(),
            model->getGridX(),
            model->getGridY(),
            model->getThreshold());
    cout << model_info << endl;
    // We could get the histograms for example:
    vector histograms = model->getHistograms();
    // But should I really visualize it? Probably the length is interesting:
    cout << "Size of the histograms: " << histograms[0].total() << endl;
    return 0;
}

https://blog.csdn.net/zuidao3105/article/details/79346591git

在OpenCV中主要使用了兩種特徵(即兩種方法)進行人臉檢測,Haar特徵和LBP特徵。使用已經訓練好的XML格式的分類器進行人臉檢測。在OpenCV的安裝目錄下的sources文件夾裏的data文件夾裏能夠看到下圖所示的內容(opencv\sources\data\haarcascades ):

文件夾的名字「haarcascades」、「hogcascades」和「lbpcascades」分別表示經過「haar」、「hog」和「lbp」三種不一樣的特徵而訓練出的分類器:"haar"特徵主要用於人臉檢測,「hog」特徵主要用於行人檢測,「lbp」特徵主要用於人臉識別,「eye」特徵主要用於眼睛的檢測識別。

實現人臉檢測主要依賴於detectMultiScale()函數,下面簡單說一下函數參數的含義,先看函數原型:
CV_WRAP virtual void detectMultiScale( const Mat& image,  
                                   CV_OUT vector& objects,  
                                   double scaleFactor=1.1,  
                                   int minNeighbors=3, int flags=0,  
                                   Size minSize=Size(),  
                                   Size maxSize=Size() ); 

各參數含義:
const Mat& image: 須要被檢測的圖像(灰度圖)
vector& objects: 保存被檢測出的人臉位置座標序列
double scaleFactor: 每次圖片縮放的比例
int minNeighbors: 每個人臉至少要檢測到多少次纔算是真的人臉
int flags: 決定是縮放分類器來檢測,仍是縮放圖像
Size(): 表示人臉的最大最小尺寸
/*檢測圖片中的人臉*/
#include 
#include <opencv2/opencv.hpp>
#include <opencv2/calib3d/calib3d.hpp>

using namespace std;
using namespace cv;

int main()
{
    Mat image, image_gray;      //定義兩個Mat變量,用於存儲每一幀的圖像

    image = imread("F://1.png");
    imshow("原圖", image);

    cvtColor(image, image_gray, CV_BGR2GRAY);//轉爲灰度圖
    equalizeHist(image_gray, image_gray);//直方圖均衡化,增長對比度方便處理

    CascadeClassifier eye_Classifier;  //載入分類器
    CascadeClassifier face_cascade;    //載入分類器

    //加載分類訓練器,OpenCv官方文檔提供的xml文檔,能夠直接調用
    //xml文檔路徑  opencv\sources\data\haarcascades 
    if (!eye_Classifier.load("F:\\haarcascade_eye.xml"))  //須要將xml文檔放在本身指定的路徑下
    {  
        cout << "Load haarcascade_eye.xml failed!" << endl;
        return 0;
    }

    if (!face_cascade.load("F:\\haarcascade_frontalface_alt.xml"))
    {
        cout << "Load haarcascade_frontalface_alt failed!" << endl;
        return 0;
    }

    //vector 是個類模板 須要提供明確的模板實參 vector則是個肯定的類 模板的實例化
    vector eyeRect;
    vector faceRect;

    //檢測關於眼睛部位位置
    eye_Classifier.detectMultiScale(image_gray, eyeRect, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(30, 30));
    for (size_t eyeIdx = 0; eyeIdx < eyeRect.size(); eyeIdx++)
    {   
        rectangle(image, eyeRect[eyeIdx], Scalar(0, 0, 255));   //用矩形畫出檢測到的位置
    }

    //檢測關於臉部位置
    face_cascade.detectMultiScale(image_gray, faceRect, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(30, 30));
    for (size_t i = 0; i < faceRect.size(); i++)
    {   
        rectangle(image, faceRect[i], Scalar(0, 0, 255));      //用矩形畫出檢測到的位置
    }

    imshow("人臉識別圖", image);         //顯示當前幀
    waitKey(0);

}

return 0;
/*檢測攝像頭中的人臉*/
#include 
#include <opencv2/opencv.hpp>
#include <opencv2/calib3d/calib3d.hpp>

using namespace std;
using namespace cv;

int main()
{
    Mat image, image_gray;      //定義兩個Mat變量,用於存儲每一幀的圖像
    VideoCapture capture(0);    //從攝像頭讀入視頻

    while (1)                  //循環顯示每一幀
    {
        capture >> image;     //讀取當前幀

        //image = imread("F://1.png");
        //imshow("原圖", image);

        cvtColor(image, image_gray, CV_BGR2GRAY);//轉爲灰度圖
        equalizeHist(image_gray, image_gray);//直方圖均衡化,增長對比度方便處理

        CascadeClassifier eye_Classifier;  //載入分類器
        CascadeClassifier face_cascade;    //載入分類器

        //加載分類訓練器,OpenCv官方文檔提供的xml文檔,能夠直接調用
        //xml文檔路徑  opencv\sources\data\haarcascades 
        if (!eye_Classifier.load("F:\\haarcascade_eye.xml"))  //須要將xml文檔放在本身指定的路徑下
        {  
            cout << "Load haarcascade_eye.xml failed!" << endl;
            return 0;
        }

        if (!face_cascade.load("F:\\haarcascade_frontalface_alt.xml"))
        {
            cout << "Load haarcascade_frontalface_alt failed!" << endl;
            return 0;
        }

        //vector 是個類模板 須要提供明確的模板實參 vector則是個肯定的類 模板的實例化
        vector eyeRect;
        vector faceRect;

        //檢測關於眼睛部位位置
        eye_Classifier.detectMultiScale(image_gray, eyeRect, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(30, 30));//檢測
        for (size_t eyeIdx = 0; eyeIdx < eyeRect.size(); eyeIdx++)
        {   
            rectangle(image, eyeRect[eyeIdx], Scalar(0, 0, 255));   //用矩形畫出檢測到的位置
        }

        //檢測關於臉部位置
        face_cascade.detectMultiScale(image_gray, faceRect, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(30, 30));//檢測
        for (size_t i = 0; i < faceRect.size(); i++)
        {   
            rectangle(image, faceRect[i], Scalar(0, 0, 255));      //用矩形畫出檢測到的位置
        }

        imshow("人臉識別圖", image);         //顯示當前幀
        char c = waitKey(30);         //延時30ms,即每秒播放33幀圖像
        if (c == 27)  break;    
    }

    return 0;
}

https://blog.csdn.net/sinat_26917383/article/details/69831495github

HAAR級聯檢測基礎算法

https://blog.csdn.net/shadow_guo/article/details/44114421數據庫

調整基於HAAR特徵的AdaBoost級聯分類器的物體識別的參數網絡

 

2.Cloud APIapp

Baidu AI:https://ai.baidu.com/docs#/Face-Detect/top (face qualities/landmark72)dom

Face++:https://console.faceplusplus.com.cn/documents/4888373(HumanBody/Gesture)

MS:https://docs.microsoft.com/en-us/azure/cognitive-services/face/overview(Face detection/Face landmarks/Head pose/Face attributes)

3.Open Library

https://github.com/TadasBaltrusaitis/OpenFace

 

https://github.com/ageitgey/face_recognitionhttp://dlib.net

import face_recognition
known_image = face_recognition.load_image_file("biden.jpg")
unknown_image = face_recognition.load_image_file("unknown.jpg")

biden_encoding = face_recognition.face_encodings(known_image)[0]
unknown_encoding = face_recognition.face_encodings(unknown_image)[0]

results = face_recognition.compare_faces([biden_encoding], unknown_encoding)

https://github.com/seetaface/SeetaFaceEngine

 

http://www.arcsoft.com.cn/ai/arcface.html

https://github.com/accord-net/framework

http://www.aforgenet.com/framework/

http://www.emgu.com/wiki/index.php/Main_Page

 

4.Face Recognition with DL 

https://github.com/mbhybird/facenet

https://github.com/mbhybird/openface

Multi-task Cascaded Convolutional Networks(MTCNN)

 http://www.javashuo.com/article/p-wtdquuzn-gm.html

第一步:尋找全部的臉

 

在咱們的流水線中的第一步是人臉檢測。很明顯在咱們區分人臉以前須要在圖片中將臉標記出來。

 

若是你有在最近十年裏面用過相機的話,你可能已經見過正在運行中的人臉檢測了:

 

面部識別是相機的一個偉大的功能。當相機能夠自動挑出面部的時候,這將確保在拍照片的瞬間全部的臉都對準焦點了。不過咱們使用它是爲了別的目的--尋找咱們想在下一步要傳遞的照片區域。

 

2000年年初的時候,當Paul Viola和Michael Jones發明了一種能夠在廉價相機上面快速運行的面部檢測技術後,人臉檢測成爲了主流。然而如今更可靠的解決方案出現了。咱們如今用的是2005年發明的一個叫作方向梯度直方圖,簡稱爲HOG

 

爲了識別出圖片中的臉,首先咱們須要將圖片轉換爲黑白色,由於在識別面部的時候咱們不須要顏色數據。

 

而後咱們須要依次遍歷圖片中的每一個像素。對於單個像素,咱們也須要看直接包圍它的其餘元素:

 

 

咱們的目標是比較這個像素與周圍像素的深度。而後咱們要畫一個箭頭來表明圖像變暗的方向:

 

 

若是你對這個圖像中的每一個像素都重複這個過程,最後每一個像素,最終每一個像素會被一個箭頭取代。這些箭頭被稱爲梯度(gradients),它們能顯示出圖像上從明亮到黑暗的流動過程:

 

 

這看起來沒有明確的目的,但其實這頗有必要。若是咱們直接分析像素,同一我的明暗不一樣的兩張照片將具備徹底不一樣的像素值。可是若是隻考慮亮度變化方向(direction)的話,明暗圖像將會有一樣的結果。這使得問題變得更容易解決!

 

可是保存每一個像素的梯度太過細節化了,咱們最終頗有可能撿了芝麻丟了西瓜。若是能從更高的角度上觀察基本的明暗流動,咱們就能夠看出圖像的基本規律,這會比以前更好。

 

爲了作到這一點,咱們將圖像分割成一些 16×16 像素的小方塊。在每一個小方塊中,咱們將計算出每一個主方向上有多少個梯度(有多少指向上,指向右上,指向右等)。而後咱們將用指向性最強那個方向的箭頭來代替原來的那個小方塊。

 

最終的結果是,咱們把原始圖像轉換成了一個很是簡單的表達形式,這種表達形式能夠用一種簡單的方式來捕獲面部的基本結構:

 

原始圖像被表示成了 HOG 形式,以捕獲圖像的主要特徵,不管圖像明暗度如何。

 

爲了在這個 HOG 圖像中找到臉部,咱們要所須要作的,就是找到咱們的圖像中,與已知的一些 HOG 圖案中,看起來最類似的部分。這些 HOG 圖案都是從其餘面部訓練數據中提取出來的:

 

使用這種技術,咱們如今能夠輕鬆地在任何圖片中找到臉部:

 

 

 

 

第二步:臉部的不一樣姿式和方位

 

噹噹噹,咱們把圖片中的臉部分離出來了。 但如今,咱們要處理的問題就是,對於電腦來講,面朝不一樣方向的同一張臉是兩我的:

人類能夠很輕鬆地識別出到兩個圖片都是同一我的,但電腦會認爲這兩張圖片是兩個徹底不一樣的人。

 

爲了解決這一點,咱們將試圖扭曲每一個圖片,使得眼睛和嘴脣老是在圖像中的樣本位置(sample place)。 這將使咱們在接下來的步驟中,更容易比較臉部之間的不一樣。

 

爲此,咱們將使用一種稱爲面部特徵點估計(face landmark estimation)的算法。 不少方法均可以作到這一點,但此次咱們會使用由 瓦希德·卡奇米(Vahid Kazemi)和約瑟菲娜·沙利文(Josephine Sullivan)在 2014 年發明的方法

 

基本思路是找到 68 我的臉上廣泛存在的特徵點( landmarks)——包括下巴的頂部、每隻眼睛的外部輪廓、每條眉毛的內部輪廓等。接下來咱們訓練一個機器學習算法,讓它可以在任何臉部找到這 68 個特定的點:

 

 

咱們將在每一張臉上定位的 68 個特徵點。這張圖片的做者是在OpenFace工做的卡內基梅隆大學 Ph.D. 布蘭東·阿莫斯(Brandon Amos)。

 

這是在測試圖片上定位 68 個特徵點的結果:

 

你也可使用這一技術來實現本身的 Snapchat 實時 3D 臉部過濾器!

 

如今,咱們知道了眼睛和嘴巴在哪兒,咱們將圖像進行旋轉、縮放和錯切,使得眼睛和嘴巴儘量靠近中心。咱們不會作任何花哨的三維扭曲,由於這會讓圖像失真。咱們只會使用那些可以保持圖片相對平行的基本圖像變換,例如旋轉和縮放(稱爲仿射變換):

如今不管人臉朝向哪邊,咱們都能將眼睛和嘴巴向中間挪動到大體相同的位置。這將使咱們的下一步更加準確。

第三步:給臉部編碼

 

如今咱們要面臨最核心的問題了——準確識別不一樣的人臉。這纔是這件事的有趣之處!

 

最簡單的人臉識別方法,是把咱們在第二步中發現的未知人臉,與咱們已經標註了的人臉圖片做比較。當咱們發現未知的面孔與一個之前標註過的面孔看起來及其類似的時候,它確定是同一我的。這個想看起來很完美,對吧?

 

實際上這種方法有一個巨大的問題。像 Facebook 這種擁有數十億用戶和數萬億張照片的網站,是不可能去循環比較每張先前標記的臉的,這太浪費時間了。他們須要在毫秒內識別人臉,而不是幾個小時。

 

咱們須要的方法是從每張人臉上提取一些基本的測量數值。而後,咱們能夠用一樣的方式測量未知的面孔,並找到最接近測量數值的那張已知的臉。例如,咱們能夠測量每一個耳朵的大小、眼距、鼻子的長度等。若是你曾經看過像《犯罪現場調查》這樣的電視劇,你就知道我在說什麼了。

 

 

測量面部的最可靠方法

 

好的,因此爲了創建咱們的已知臉部數據庫呢,咱們應該測量面部的哪些數值?耳朵的大小?鼻子的長度?眼睛的顏色?還有什麼?

 

事實證實,對於咱們人類來講一些顯而易見的測量值(好比眼睛顏色),對計算機來講沒什麼意義。研究人員發現,最準確的方法是讓計算機本身找出它要收集的測量值。深度學習在尋找哪些部分的測量值比較重要方面表現的比人類更好。

 

因此,解決方案是訓練一個深度卷積神經網絡。可是,並非讓它去識別圖片中的物體,這一次咱們的訓練是要讓它爲臉部生成 128 個測量值。

 

每次訓練要觀察三個不一樣的臉部圖像:

 

1. 加載一張已知的人的面部訓練圖像

 

2. 加載同一我的的另外一張照片

 

3. 加載另一我的的照片

 

而後,算法查看它本身爲這三個圖片生成的測量值。再而後,稍微調整神經網絡,以確保第一張和第二張生成的測量值接近,而第二張和第三張生成的測量值略有不一樣。

 

 

在爲幾千我的的數百萬圖像重複該步驟幾百萬次以後,神經網絡學習瞭如何可靠地爲每一個人生成 128 個測量值。對於同一我的的任何十張不一樣的照片,它都應該給出大體相同的測量值。

 

機器學習專業人士把每張臉的 128 個測量值稱爲一個嵌入(embedding)。將複雜的原始數據(如圖片)縮減爲可由計算機生成的一個數列的方法,在機器學習(特別是語言翻譯)中出現了不少次。咱們正在使用的這種臉部提取方法是由 Google 的研究人員在 2015 年發明的,但也有許多相似方法存在。

 

給咱們的臉部圖像編碼

 

這個經過訓練卷積神經網絡來輸出臉部嵌入的過程,須要大量的數據和強大的計算能力。即便使用昂貴的 Nvidia Telsa 顯卡,你也須要大約 24 小時的連續訓練,才能得到良好的準確性。

 

但一旦網絡訓練完成,它就能夠爲每一張臉生成測量值,即便以前它從未見過這張臉!因此這種訓練只需一次便可。幸運的是,OpenFace 上面的大牛已經作完了這些,而且他們發佈了幾個訓練過能夠直接使用的網絡。謝謝Brandon Amos他的團隊!

 

因此咱們須要作的,就是經過他們預訓練的網絡來處理咱們的臉部圖像,以得到 128 個測量值。這是咱們測試圖像的一些測量值:

 

 

那麼,這 128 個數字到底測量了臉部的哪些部分?咱們固然不知道,可是這對咱們並不重要。咱們關心的是,當看到同一我的兩張不一樣的圖片時,咱們的網絡能獲得幾乎相同的數值。

 

第四步:從編碼中找出人的名字

 

最後這一步其實是整個過程當中最簡單的一步。咱們要作的就是找到數據庫中,與咱們的測試圖像的測量值最接近的那我的。

 

你能夠經過任何基本的機器學習分類算法來達成這一目標。咱們並不須要太花哨的深度學習技巧。咱們將使用一個簡單的線性 SVM 分類器,但實際上還有不少其餘的分類算法可使用。

 

咱們須要作的是訓練一個分類器,它能夠從一個新的測試圖像中獲取測量結果,並找出最匹配的那我的。分類器運行一次只須要幾毫秒,分類器的結果就是人的名字!

 

因此讓咱們試一下咱們的系統。首先,我使用Will Ferrell, Chad Smith and Jimmy Falon三人每人 20 張照片的嵌入來訓練分類器:

 

嗯……就是這些訓練數據!

 

接下來,在這個分類器上運行了威爾·法瑞爾和查德·史密斯在吉米·法倫的節目上互相模仿的那個視頻的每一幀:

https://cdn-images-1.medium.com/max/800/1*_GNyjR3JlPoS9grtIVmKFQ.gif

 

總結:

1. 使用 HOG 算法給圖片編碼,以建立圖片的簡化版本。使用這個簡化的圖像,找到其中看起來最像通用 HOG 面部編碼的部分。 

2. 經過找到臉上的主要特徵點,找出臉部的姿式。一旦咱們找到這些特徵點,就利用它們把圖像扭曲,使眼睛和嘴巴居中。

3. 把上一步獲得的面部圖像放入神經網絡中,神經網絡知道如何找到 128 個特徵測量值。保存這 128 個測量值。

4. 看看咱們過去已經測量過的全部臉部,找出哪一個人的測量值和咱們要測量的面部最接近。這就是你要找的人!

 

5.Object Detection

https://pjreddie.com/darknet/yolo/

https://github.com/allanzelener/YAD2K

# Installation
git clone https://github.com/allanzelener/yad2k.git
cd yad2k

# [Option 1] To replicate the conda environment:
conda env create -f environment.yml
source activate yad2k
# [Option 2] Install everything globaly.
pip install numpy h5py pillow
pip install tensorflow-gpu  # CPU-only: conda install -c conda-forge tensorflow
pip install keras # Possibly older release: conda install keras

# Quick Start
wget http://pjreddie.com/media/files/yolo.weights
wget https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolo.cfg
./yad2k.py yolo.cfg yolo.weights model_data/yolo.h5
./test_yolo.py model_data/yolo.h5  # output in images/out/

https://github.com/mbhybird/keras-yolo3

# Quick Start
wget https://pjreddie.com/media/files/yolov3.weights
python convert.py yolov3.cfg yolov3.weights model_data/yolo.h5
python yolo.py
or 
python yolo_video.py
相關文章
相關標籤/搜索