看網上方法不少,但版本都不夠新,我看了網上一些知識,總結了下,來個最新版Xcode6.1的.ios
最近主要想作iOS端的車牌識別,因此開始瞭解OpenCV。有興趣的能夠跟我交流下哈。c++
一.Opencv的使用:git
步驟:github
1.從官網下載iOS版本的Opencv2.framework。算法
2.拖進工程,選擇copy items if needed數組
3.進入building settings,設置Framework SearchPath:ide
設置成$(PROJECT_DIR)/Newtest,這個Newtest是你的項目名,主要是爲了定位到你存放的Opencv2.framework所在位置。函數
4.使用Opencv的方式:第(1)種全局pch:(不推薦)新建pch文件,修改爲:ui
#ifdef __cplusplusthis
#import <opencv2/opencv.hpp>
#endif
並在building setting裏的 Incease Sharing of Precompiled Headers項目處:
設置成$(PROJECT_DIR)/Newtest,同理,這個Newtest是你的項目名,主要是爲了定位到你存放的PCH文件所在位置。
PCH文件之前建工程默認生成,是全局性質的import。Xcode6再也不自動生成。蘋果引導開發者在某個類要用時才用。
第(2)種:在須要的地方#import <opencv2/opencv.hpp>
這裏的重點是:使用opencv的類名必定要改爲.mm!!
好比你專門寫了各一個處理圖片的類,Imageprocess。能夠在.h里加入。
二:灰度化和二值化的主要實現過程:
其實過程就是這樣:
UIImage(iOS圖像類)-> cv::Mat(OpenCV圖像類) -> Opencv灰度或二值處理函數 -> UIImage
三:Opencv類Imageprocess代碼參考:
Imageprocess.h
// // Imageprocess.h // Chepaishibie // // Created by shen on 15/1/28. // Copyright (c) 2015年 shen. All rights reserved. // #import <Foundation/Foundation.h> #import <opencv2/opencv.hpp> #import <UIKit/UIKit.h> @interface Imageprocess : UIViewController - (cv::Mat)cvMatFromUIImage:(UIImage *)image; - (UIImage *)UIImageFromCVMat:(cv::Mat)cvMat; - (IplImage *)CreateIplImageFromUIImage:(UIImage *)image; - (UIImage *)UIImageFromIplImage:(IplImage *)image; - (UIImage *)Grayimage:(UIImage *)srcimage; - (UIImage *)Erzhiimage:(UIImage *)srcimage; int Otsu(unsigned char* pGrayImg , int iWidth , int iHeight); @end
Imageprocess.mm 裏面包含了不少函數:
主要是 UIImage->cv::Mat ,cv::Mat->UIImage,UIImage->IplImage,IplImage->UIImage, 灰度化,二值化等,還有個OSTU計算閾值的方法。
// // Imageprocess.mm // Chepaishibie // // Created by shen on 15/1/28. // Copyright (c) 2015年 shen. All rights reserved. // #import "Imageprocess.h" @implementation Imageprocess #pragma mark - opencv method // UIImage to cvMat - (cv::Mat)cvMatFromUIImage:(UIImage *)image { CGColorSpaceRef colorSpace = CGImageGetColorSpace(image.CGImage); CGFloat cols = image.size.width; CGFloat rows = image.size.height; cv::Mat cvMat(rows, cols, CV_8UC4); // 8 bits per component, 4 channels CGContextRef contextRef = CGBitmapContextCreate(cvMat.data, // Pointer to data cols, // Width of bitmap rows, // Height of bitmap 8, // Bits per component cvMat.step[0], // Bytes per row colorSpace, // Colorspace kCGImageAlphaNoneSkipLast | kCGBitmapByteOrderDefault); // Bitmap info flags CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), image.CGImage); CGContextRelease(contextRef); CGColorSpaceRelease(colorSpace); return cvMat; } // CvMat to UIImage -(UIImage *)UIImageFromCVMat:(cv::Mat)cvMat { NSData *data = [NSData dataWithBytes:cvMat.data length:cvMat.elemSize()*cvMat.total()]; CGColorSpaceRef colorSpace; if (cvMat.elemSize() == 1) { colorSpace = CGColorSpaceCreateDeviceGray(); } else { colorSpace = CGColorSpaceCreateDeviceRGB(); } CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data); // Creating CGImage from cv::Mat CGImageRef imageRef = CGImageCreate(cvMat.cols, //width cvMat.rows, //height 8, //bits per component 8 * cvMat.elemSize(), //bits per pixel cvMat.step[0], //bytesPerRow colorSpace, //colorspace kCGImageAlphaNone|kCGBitmapByteOrderDefault,// bitmap info provider, //CGDataProviderRef NULL, //decode false, //should interpolate kCGRenderingIntentDefault //intent ); // Getting UIImage from CGImage UIImage *finalImage = [UIImage imageWithCGImage:imageRef]; CGImageRelease(imageRef); CGDataProviderRelease(provider); CGColorSpaceRelease(colorSpace); return finalImage; } //因爲OpenCV主要針對的是計算機視覺方面的處理,所以在函數庫中,最重要的結構體是IplImage結構。 // NOTE you SHOULD cvReleaseImage() for the return value when end of the code. - (IplImage *)CreateIplImageFromUIImage:(UIImage *)image { // Getting CGImage from UIImage CGImageRef imageRef = image.CGImage; CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB(); // Creating temporal IplImage for drawing IplImage *iplimage = cvCreateImage( cvSize(image.size.width,image.size.height), IPL_DEPTH_8U, 4 ); // Creating CGContext for temporal IplImage CGContextRef contextRef = CGBitmapContextCreate( iplimage->imageData, iplimage->width, iplimage->height, iplimage->depth, iplimage->widthStep, colorSpace, kCGImageAlphaPremultipliedLast|kCGBitmapByteOrderDefault ); // Drawing CGImage to CGContext CGContextDrawImage( contextRef, CGRectMake(0, 0, image.size.width, image.size.height), imageRef ); CGContextRelease(contextRef); CGColorSpaceRelease(colorSpace); // Creating result IplImage IplImage *ret = cvCreateImage(cvGetSize(iplimage), IPL_DEPTH_8U, 3); cvCvtColor(iplimage, ret, CV_RGBA2BGR); cvReleaseImage(&iplimage); return ret; } // NOTE You should convert color mode as RGB before passing to this function - (UIImage *)UIImageFromIplImage:(IplImage *)image { CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB(); // Allocating the buffer for CGImage NSData *data = [NSData dataWithBytes:image->imageData length:image->imageSize]; CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data); // Creating CGImage from chunk of IplImage CGImageRef imageRef = CGImageCreate( image->width, image->height, image->depth, image->depth * image->nChannels, image->widthStep, colorSpace, kCGImageAlphaNone|kCGBitmapByteOrderDefault, provider, NULL, false, kCGRenderingIntentDefault ); // Getting UIImage from CGImage UIImage *ret = [UIImage imageWithCGImage:imageRef]; CGImageRelease(imageRef); CGDataProviderRelease(provider); CGColorSpaceRelease(colorSpace); return ret; } #pragma mark - custom method // OSTU算法求出閾值 int Otsu(unsigned char* pGrayImg , int iWidth , int iHeight) { if((pGrayImg==0)||(iWidth<=0)||(iHeight<=0))return -1; int ihist[256]; int thresholdValue=0; // „–÷µ int n, n1, n2 ; double m1, m2, sum, csum, fmax, sb; int i,j,k; memset(ihist, 0, sizeof(ihist)); n=iHeight*iWidth; sum = csum = 0.0; fmax = -1.0; n1 = 0; for(i=0; i < iHeight; i++) { for(j=0; j < iWidth; j++) { ihist[*pGrayImg]++; pGrayImg++; } } pGrayImg -= n; for (k=0; k <= 255; k++) { sum += (double) k * (double) ihist[k]; } for (k=0; k <=255; k++) { n1 += ihist[k]; if(n1==0)continue; n2 = n - n1; if(n2==0)break; csum += (double)k *ihist[k]; m1 = csum/n1; m2 = (sum-csum)/n2; sb = (double) n1 *(double) n2 *(m1 - m2) * (m1 - m2); if (sb > fmax) { fmax = sb; thresholdValue = k; } } return(thresholdValue); } -(UIImage *)Grayimage:(UIImage *)srcimage{ UIImage *resimage; //openCV二值化過程: /* //1.Src的UIImage -> Src的IplImage IplImage* srcImage1 = [self CreateIplImageFromUIImage:srcimage]; //2.設置Src的IplImage的ImageROI int width = srcImage1->width; int height = srcImage1->height; printf("圖片大小%d,%d\n",width,height); // 分割矩形區域 int x = 400; int y = 1100; int w = 1200; int h = 600; //cvSetImageROI:基於給定的矩形設置圖像的ROI(感興趣區域,region of interesting) cvSetImageROI(srcImage1, cvRect(x, y, w , h)); //3.建立新的dstImage1的IplImage,並複製Src的IplImage IplImage* dstImage1 = cvCreateImage(cvSize(w, h), srcImage1->depth, srcImage1->nChannels); //cvCopy:若是輸入輸出數組中的一個是IplImage類型的話,其ROI和COI將被使用。 cvCopy(srcImage1, dstImage1,0); //cvResetImageROI:釋放基於給定的矩形設置圖像的ROI(感興趣區域,region of interesting) cvResetImageROI(srcImage1); resimage = [self UIImageFromIplImage:dstImage1]; */ //4.dstImage1的IplImage轉換成cvMat形式的matImage cv::Mat matImage = [self cvMatFromUIImage:srcimage]; cv::Mat matGrey; //5.cvtColor函數對matImage進行灰度處理 //取得IplImage形式的灰度圖像 cv::cvtColor(matImage, matGrey, CV_BGR2GRAY);// 轉換成灰色 //6.使用灰度後的IplImage形式圖像,用OSTU算法算閾值:threshold //IplImage grey = matGrey; resimage = [self UIImageFromCVMat:matGrey]; /* unsigned char* dataImage = (unsigned char*)grey.imageData; int threshold = Otsu(dataImage, grey.width, grey.height); printf("閾值:%d\n",threshold); //7.利用閾值算得新的cvMat形式的圖像 cv::Mat matBinary; cv::threshold(matGrey, matBinary, threshold, 255, cv::THRESH_BINARY); //8.cvMat形式的圖像轉UIImage UIImage* image = [[UIImage alloc ]init]; image = [self UIImageFromCVMat:matBinary]; resimage = image; */ return resimage; } -(UIImage *)Erzhiimage:(UIImage *)srcimage{ UIImage *resimage; //openCV二值化過程: /* //1.Src的UIImage -> Src的IplImage IplImage* srcImage1 = [self CreateIplImageFromUIImage:srcimage]; //2.設置Src的IplImage的ImageROI int width = srcImage1->width; int height = srcImage1->height; printf("圖片大小%d,%d\n",width,height); // // 分割矩形區域 int x = 400; int y = 1100; int w = 1200; int h = 600; //cvSetImageROI:基於給定的矩形設置圖像的ROI(感興趣區域,region of interesting) cvSetImageROI(srcImage1, cvRect(x, y, w , h)); //3.建立新的dstImage1的IplImage,並複製Src的IplImage IplImage* dstImage1 = cvCreateImage(cvSize(w, h), srcImage1->depth, srcImage1->nChannels); //cvCopy:若是輸入輸出數組中的一個是IplImage類型的話,其ROI和COI將被使用。 cvCopy(srcImage1, dstImage1,0); //cvResetImageROI:釋放基於給定的矩形設置圖像的ROI(感興趣區域,region of interesting) cvResetImageROI(srcImage1); resimage = [self UIImageFromIplImage:dstImage1]; */ //4.dstImage1的IplImage轉換成cvMat形式的matImage cv::Mat matImage = [self cvMatFromUIImage:srcimage]; cv::Mat matGrey; //5.cvtColor函數對matImage進行灰度處理 //取得IplImage形式的灰度圖像 cv::cvtColor(matImage, matGrey, CV_BGR2GRAY);// 轉換成灰色 //6.使用灰度後的IplImage形式圖像,用OSTU算法算閾值:threshold IplImage grey = matGrey; unsigned char* dataImage = (unsigned char*)grey.imageData; int threshold = Otsu(dataImage, grey.width, grey.height); printf("閾值:%d\n",threshold); //7.利用閾值算得新的cvMat形式的圖像 cv::Mat matBinary; cv::threshold(matGrey, matBinary, threshold, 255, cv::THRESH_BINARY); //8.cvMat形式的圖像轉UIImage UIImage* image = [[UIImage alloc ]init]; image = [self UIImageFromCVMat:matBinary]; resimage = image; return resimage; } @end
四:可能問題:
1.出現'list' file not found: 檢查類名是否改爲.mm了!還不行的話,在Build Phases 中加入庫:libc++.dylib 試試。
2.arm64不支持的問題:在Building settings裏Build Active Architecture Only改成No,而後下面Valid Architectures把arm64刪了。
五:樣例參考:有兩個很好的例子,一個是二值,一個是圖像匹配。
1.二值 https://github.com/zltqzj/ios_opencv_divide
2.圖像匹配 https://github.com/jimple/OpenCVSample