ios OpenCv的配置和人臉識別技術

做爲一個好奇心很是重的人,面對未知的世界都想去一探到底。ios

因而作了我的臉識別的demo。c++

眼下國內的關於opencv技術文章很是少。都是互相抄襲。關鍵是抄個一小部分還不全。時間又是很是久以前的了,和現在的一些東西對不上。xcode

沒事,我是個實在人,啥也很少說,直接上開始。期間參考了國內不少opencv的文章,代碼部分參考http://m.blog.csdn.net/blog/u013810454/27868973。你們可以查看。只是他那個項目下載下來有問題。框架

我這個融合了所有的長處,更加全面。從配置到使用。ide

首先咱們來配置opencv在xcodeproject。post

1.opencv官網下載ios下的框架,先把opencv2.framework下載下來。.net

而後直接拖到先前建立好的工程中。線程


而後代理


而後code


現在主要的配置已經完畢。是時候展示真正的技術了。固然別忘記把.m改爲.mm。以便使用c++。

#import "ViewController.h"
#import <Foundation/Foundation.h>


int currentvalue = 9;

@interface ViewController ()<UIImagePickerControllerDelegate,UINavigationControllerDelegate>

{
    //顯示圖片
    UIImageView *_imageView;
    UIImage *image;
}
@end

@implementation ViewController

- (void)viewDidLoad {
    [super viewDidLoad];
    // Do any additional setup after loading the view.
    [self createButton];
    //建立一個UIImagePickerController對象
    UIImagePickerController *ctrl = [[UIImagePickerController alloc] init];
    //設置類型
    ctrl.sourceType = UIImagePickerControllerSourceTypePhotoLibrary;
    //設置代理
    ctrl.delegate = self;
    
    //顯示
    [self presentViewController:ctrl animated:YES completion:nil];
    
    self.view.backgroundColor = [UIColor whiteColor];
    
    
    //建立一個UIImageView,用來顯示選擇的圖片
    _imageView = [[UIImageView alloc] initWithFrame:CGRectMake(50, 100, 300, 400)];
    [self.view addSubview:_imageView];
    
}



#pragma mark -  UIImagePickerController代理
-(void)imagePickerController:(UIImagePickerController *)picker didFinishPickingMediaWithInfo:(NSDictionary *)info
{
    //取到選擇的圖片
    image = info[UIImagePickerControllerOriginalImage];

    UIImageOrientation imageOrientation=image.imageOrientation;
    if(imageOrientation!=UIImageOrientationUp)
    {
        // 原始圖片可以依據照相時的角度來顯示,但UIImage沒法斷定,因而出現獲取的圖片會向左轉90度的現象。
        // 下面爲調整圖片角度的部分
        UIGraphicsBeginImageContext(image.size);
        [image drawInRect:CGRectMake(0, 0, image.size.width, image.size.height)];
        image = UIGraphicsGetImageFromCurrentImageContext();
        UIGraphicsEndImageContext();
        // 調整圖片角度完成
    }
    //處理圖片
    _imageView.image = image;
    
    [picker dismissViewControllerAnimated:YES completion:nil];
    
    
}

-(void)imagePickerControllerDidCancel:(UIImagePickerController *)picker
{
    [picker dismissViewControllerAnimated:YES completion:nil];
}

//把image轉化成opencv的圖片格式
- (IplImage *)CreateIplImageFromUIImage:(UIImage *)image {
    CGImageRef imageRef = image.CGImage;
    
    CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
    IplImage *iplimage = cvCreateImage(cvSize(image.size.width, image.size.height), IPL_DEPTH_8U, 4);
    CGContextRef contextRef = CGBitmapContextCreate(iplimage->imageData, iplimage->width, iplimage->height,
                                                    iplimage->depth, iplimage->widthStep,
                                                    colorSpace, kCGImageAlphaPremultipliedLast|kCGBitmapByteOrderDefault);
    CGContextDrawImage(contextRef, CGRectMake(0, 0, image.size.width, image.size.height), imageRef);
    CGContextRelease(contextRef);
    CGColorSpaceRelease(colorSpace);
    
    IplImage *ret = cvCreateImage(cvGetSize(iplimage), IPL_DEPTH_8U, 3);
    cvCvtColor(iplimage, ret, CV_RGBA2BGR);
    cvReleaseImage(&iplimage);
    
    return ret;
}
- (void) opencvFaceDetect  {

    UIImage* img = [image copy];
    if(img) {

        
        cvSetErrMode(CV_ErrModeParent);
        IplImage *image = [self CreateIplImageFromUIImage:img];
        
        IplImage *grayImg = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1); //先轉爲灰度圖
        cvCvtColor(image, grayImg, CV_BGR2GRAY);
        
        //將輸入圖像縮小4倍以加快處理速度
        int scale = 4;
        IplImage *small_image = cvCreateImage(cvSize(image->width/scale,image->height/scale), IPL_DEPTH_8U, 1);
        cvResize(grayImg, small_image);
        
        //載入分類器
        NSString *path = [[NSBundle mainBundle] pathForResource:@"haarcascade_frontalface_alt2" ofType:@"xml"];
        CvHaarClassifierCascade* cascade = (CvHaarClassifierCascade*)cvLoad([path cStringUsingEncoding:NSASCIIStringEncoding], NULL, NULL, NULL);
        CvMemStorage* storage = cvCreateMemStorage(0);
        cvClearMemStorage(storage);
        
        //關鍵部分。使用cvHaarDetectObjects進行檢測。獲得一系列方框
        CvSeq* faces = cvHaarDetectObjects(small_image, cascade, storage ,1.1, currentvalue, CV_HAAR_DO_CANNY_PRUNING, cvSize(0,0), cvSize(0, 0));
        
        NSLog(@"faces:%d",faces->total);

        
        //建立畫布將人臉部分標記出
        CGImageRef imageRef = img.CGImage;
        CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
        CGContextRef contextRef = CGBitmapContextCreate(NULL, img.size.width, img.size.height,8, img.size.width * 4,colorSpace, kCGImageAlphaPremultipliedLast|kCGBitmapByteOrderDefault);
        
        CGContextDrawImage(contextRef, CGRectMake(0, 0, img.size.width, img.size.height), imageRef);
        
        CGContextSetLineWidth(contextRef, 4);
        CGContextSetRGBStrokeColor(contextRef, 1.0, 0.0, 0.0, 1);
        
        //對人臉進行標記
        for(int i = 0; i < faces->total; i++) {
            // Calc the rect of faces
            CvRect cvrect = *(CvRect*)cvGetSeqElem(faces, i);
            CGRect face_rect = CGContextConvertRectToDeviceSpace(contextRef, CGRectMake(cvrect.x*scale, cvrect.y*scale , cvrect.width*scale, cvrect.height*scale));
            

                CGContextStrokeRect(contextRef, face_rect);
            
        
        }
        
       _imageView.image = [UIImage imageWithCGImage:CGBitmapContextCreateImage(contextRef)];
        

    }
    
}


//檢測略耗時,開一個新線程來處理吧
-(void)btn
{
    [NSThread detachNewThreadSelector:@selector(opencvFaceDetect) toTarget:self withObject:nil];
}



-(void)createButton
{
    
    
    UIButton *btn = [[UIButton alloc]init];
    btn.backgroundColor = [UIColor redColor];
    btn.frame = CGRectMake(0, 100, 30, 30);
    [btn addTarget:self action:@selector(btn) forControlEvents:UIControlEventTouchUpInside];
    [self.view addSubview:btn];
    
}
@end

ok,現在可以檢測人臉了。


是否是很是奇妙。很是好玩?趕忙動手試一試吧。

相關文章
相關標籤/搜索