直方圖能夠用來描述不一樣的參數和事物,如物體的色彩分佈,物體的邊緣梯度模版以及目標位置的當前假設的機率分佈.數組
直方圖就是對數據進行統計的一種方法,而且將統計值定義到一系列定義好的bin(組距)中,得到一張數據分佈的統計圖.函數
好比,如今有一個一維數組,其值從0-255,咱們能夠以20爲組距,來分別統計數組中0-20的數據的總量,20-40的數據的總量,最後,以這個bin做爲橫軸,統計值做爲y軸,獲得一張統計圖,這就是數據範圍的直方圖,再好比,一張灰度圖像,值也是0-255,咱們也能夠這樣作,這樣也能獲得一張灰度的統計圖(實際上前面所說的直方圖均衡化,第一步就是作的這個工做)測試
圖像直方圖,是表示數字圖像中亮度的直方圖,標識了圖像中每一個亮度值的像素數量,計算機領域中,常藉助於圖像的直方圖來實現圖像的二值化.spa
總結來講兩點1.直方圖正對圖像來講,是圖像中像素強度分佈的圖形表達方式2.統計的是每個強度值所具備的像素個數.指針
直方圖並不侷限於統計像素灰度,他能夠統計圖像的任何特徵,如梯度,方向等,code
術語1.dims 需統計的特徵的數量,僅統計灰度,dims = 12.bin 每一個特徵空間中子區段的數目,也叫直條或者組距.3.range 每一個特徵空間的取值範圍,如灰度特徵空間,取值就是0-255orm
一.直方圖計算blog
API:void calcHist(源圖或者元數據指針,int 輸入的源的個數,須要統計的通道索引dims,inputarray 可選的操做掩碼,outputarray 輸出目標直方圖,int 需計算的直方圖維度,int* 直方圖每一個維度的尺寸構成的數組,float** 每一個維度的取值範圍構成的數組的指針,bool 直方圖是否均勻,bool 累計標識符)索引
注:源數組中每個元素的深度和尺寸應該相同,若是操做掩碼不爲noarray(),則操做掩碼必須爲八位,並且和源數組中的每個元素的尺寸相同,掩碼中不爲0區域的座標對應的源數組中的相應座標的元素纔會被統計,直方圖是否均勻,默認取值爲true(均勻),累計標識符默認爲false,該標識符的做用是容許從多個陣列中計算直方圖,或者在特定的時間更新直方圖.圖片
API:Point* minMaxLoc(inputarray 輸入數組,最小值指針,最大值指針,point* 最小元素對應的座標,point* 最大元素對應的座標,inputarray 子陣列可選掩碼,)
注:MatND是直方圖對應的數據類型,用來存儲直方圖.
就這麼說顯得不知道幹嗎的,那讓咱們看看例子怎麼用直方圖
//計算色調 飽和度 二維直方圖 h-s hist int main(int argc,char* argv[]) { Mat srcImagergb,srcImagehsv; srcImagergb = imread("F:\\opencv\\OpenCVImage\\hsvHist.jpg"); cvtColor(srcImagergb, srcImagehsv, CV_RGB2HSV); int hueBinNumber = 30;//色調bin量化級別 int saturationBinNumber = 30;//飽和度bin量化級別 int histSize[2] = {hueBinNumber,saturationBinNumber}; //定義色調變化範圍 float hueRanges[2] = {0,180}; //定義飽和度變化範圍 float saturationRanges[2] = {0,256}; const float* ranges[2] = {hueRanges,saturationRanges}; MatND distHist; int channels[2] = {0,1};//h s 通道 calcHist(&srcImagehsv, //輸入的數組 1,//輸入數組中圖像個數 channels,//通道索引 0 1 通道 Mat(),//掩碼爲空 distHist,//輸出的目標直方圖 2,//須要計算的直方圖的維度 histSize,//存放每一個維度直方圖尺寸的數組 ranges,//每一維數值取值範圍數組 true,//直方圖均勻 false);//直方圖在配置階段被清零 //爲繪製直方圖準備參數 double maxValue = 0; minMaxLoc(distHist, 0,&maxValue,0,0); int scale = 10; Mat histImage = Mat(saturationBinNumber*scale,hueBinNumber*10,CV_8UC3); for(int i = 0; i < hueBinNumber; i++) { for (int j = 0; j < saturationBinNumber; j++) { float binValue = distHist.at<float>(i,j);//直方圖直條的值 int intensity = cvRound(binValue*255/maxValue);//強度 rectangle(histImage, Point(i*scale,j*scale), Point((i+1)*scale-1,(j+1)*scale-1), Scalar::all(intensity),FILLED); } } imshow("src image", srcImagehsv); imshow("hist image", histImage); moveWindow("src image", 0, 0); moveWindow("hist image", srcImagehsv.cols, 0); waitKey(0); return 0; } //圖像的一維直方圖 int main(int argc,char* argv[]) { Mat srcImage; srcImage = imread("F:\\opencv\\OpenCVImage\\hist01.jpg"); MatND dstHist; int dims = 1; float hanges[] = {0,255}; const float* ranges[] = {hanges}; int size = 256; int channel = 0; calcHist(&srcImage, 1, &channel, Mat(), dstHist, dims, &size, ranges); int scale = 1; double minValue = 0; double maxValue = 0; minMaxLoc(dstHist, &minValue, &maxValue,0,0); Mat histImage = Mat(size*scale,size,CV_8U,Scalar(0)); int hpt = saturate_cast<int>(0.9*size); for(int i = 0 ; i < 256; i++) { float binValue = dstHist.at<float>(i); int realvalue = saturate_cast<int>(binValue*hpt/maxValue); rectangle(histImage, Point(i*scale,size-1), Point((i+1)*scale-1,size-realvalue), Scalar(255)); } imshow("src image", srcImage); imshow("hist image", histImage); moveWindow("src image", 0, 0); moveWindow("hist image", srcImage.cols, 0); waitKey(0); return 0; }
//顯示圖像的R G B 份量直方圖 //與上一個程序的主要不一樣爲提取了三個份量的直方圖,上一個程序只提取了b份量的直方圖 int main(int argc,char* argv[]) { Mat srcImage; srcImage = imread("F:\\opencv\\OpenCVImage\\histRgb.jpg"); MatND dstHistr,dstHistg,dstHistb; int dims = 1; float hanges[] = {0,255}; const float* ranges[] = {hanges}; int size = 256; int channel[] = {0}; calcHist(&srcImage, 1, channel, Mat(), dstHistb, dims, &size, ranges); channel[0] = 1; calcHist(&srcImage, 1, channel, Mat(), dstHistg, dims, &size, ranges); channel[0] = 2; calcHist(&srcImage, 1, channel, Mat(), dstHistr, dims, &size, ranges); int scale = 1; double minValueb = 0; double maxValueb = 0; double minValueg = 0; double maxValueg = 0; double minValuer = 0; double maxValuer = 0; minMaxLoc(dstHistb, &minValueb, &maxValueb,0,0); minMaxLoc(dstHistg, &minValueg, &maxValueg,0,0); minMaxLoc(dstHistr, &minValuer, &maxValuer,0,0); Mat histImage = Mat(size*scale,size*3,CV_8UC3,Scalar::all(0)); int hpt = saturate_cast<int>(0.9*size); for(int i = 0 ; i < 256; i++) { float binValue = dstHistb.at<float>(i);//直方圖的統計數量 int realvalue = saturate_cast<int>(binValue*hpt/maxValueb); rectangle(histImage, Point(i*scale,size-1), Point((i+1)*scale-1,size-realvalue), Scalar(255,0,0)); } for(int i = 0 ; i < 256; i++) { float binValue = dstHistg.at<float>(i); int realvalue = saturate_cast<int>(binValue*hpt/maxValueg); rectangle(histImage, Point(i*scale+size,size-1), Point((i+1)*scale-1+size,size-realvalue), Scalar(0,255,0)); } for(int i = 0 ; i < 256; i++) { float binValue = dstHistr.at<float>(i); int realvalue = saturate_cast<int>(binValue*hpt/maxValuer); rectangle(histImage, Point(i*scale+size*2,size-1), Point((i+1)*scale-1+size*2,size-realvalue), Scalar(0,0,255)); } imshow("src image", srcImage); imshow("hist image", histImage); moveWindow("src image", 0, 0); moveWindow("hist image", srcImage.cols, 0); waitKey(0); return 0; }
二.直方圖的匹配
雖然直方圖是一個統計值,可是有時候咱們也須要比較兩個直方圖的類似度,做爲咱們斷定依據的一部分,這時候就須要用到直方圖的匹配了.
API: double compareHist(源直方圖1,源直方圖2,int 直方圖匹配方法).
注:該API返回值就是匹配的結果,匹配方法有四種 CV_COMP_CHISQ卡方,返回值越小匹配度越高 CV_COMP_CORREL相關性匹配,返回值越大匹配程度越高 CV_COMP_INTERSECT 直方圖相交,返回值越大匹配度越高CV_COMP_BHATTACHARYYA返回值越小匹配度越高.
直方圖匹配例子以下
int main(int argc,char* argv[]) { Mat src1Image,src1HalfImage,src2Image,src3Image; Mat src1HsvImage,src1HalfHsvImage,src2HsvImage,src3HsvImage; src1Image = imread("F:\\opencv\\OpenCVImage\\compareHist01.jpg"); src2Image = imread("F:\\opencv\\OpenCVImage\\compareHist02.jpg"); src3Image = imread("F:\\opencv\\OpenCVImage\\compareHist03.jpg"); src1HalfImage = Mat(src1Image, Range(src1Image.rows/2,src1Image.rows-1),Range(0,src1Image.cols-1)); cvtColor(src1Image, src1HsvImage, CV_RGB2HSV); cvtColor(src2Image, src2HsvImage, CV_RGB2HSV); cvtColor(src3Image, src3HsvImage, CV_RGB2HSV); cvtColor(src1HalfImage, src1HalfHsvImage, CV_RGB2HSV); int hbins = 50,sbins = 60; int histSize[] = {hbins,sbins}; int channels[] = {0,1}; float hRange[] = {0,256}; float sRange[] = {0,180}; const float* ranges[] = {hRange,sRange}; MatND src1Hist,src2Hist,src3Hist,srcHalfHist; calcHist(&src1HsvImage, 1, channels, Mat(), src1Hist, 2, histSize, ranges,true,false); normalize(src1Hist, src1Hist, 0,1, NORM_MINMAX,-1,Mat()); calcHist(&src2HsvImage, 1, channels, Mat(), src2Hist, 2, histSize, ranges,true,false); normalize(src2Hist, src2Hist, 0,1, NORM_MINMAX,-1,Mat()); calcHist(&src3HsvImage, 1, channels, Mat(), src3Hist, 2, histSize, ranges,true,false); normalize(src3Hist, src3Hist, 0,1, NORM_MINMAX,-1,Mat()); calcHist(&src1HalfHsvImage, 1, channels, Mat(), srcHalfHist, 2, histSize, ranges,true,false); normalize(srcHalfHist, srcHalfHist, 0,1, NORM_MINMAX,-1,Mat()); imshow("src1 image", src1Image); imshow("src2 image", src2Image); imshow("src3 image", src3Image); imshow("src1Half image", src1HalfImage); moveWindow("src1 image", 0, 0); moveWindow("src2 image", src1Image.cols, 0); moveWindow("src3 image", src2Image.cols+src1Image.cols, 0); moveWindow("src1 half image", src1Image.cols+src2Image.cols+src3Image.cols, 0); for (int i = 0; i < 4; i++) { int compareMethod = i; double compareResultA = compareHist(src1Hist, src1Hist, i); double compareResultB = compareHist(src1Hist, src2Hist, i); double compareResultC = compareHist(src1Hist, src3Hist, i); double compareResultD = compareHist(src1Hist, srcHalfHist, i); printf("直方圖比對結果爲\r compareResultA = %.3f \ncompareResultB = %.3f \ncompareResultC = %.3f \ncompareResultD = %.3f \n",compareResultA,compareResultB,compareResultC,compareResultD); printf("比對方法爲 %d\n",compareMethod); } waitKey(0); return 0; }
三.反向投影
反向投影是一種首先尋找某一特徵的直方圖模型,而後根據這個模型去尋找圖像中是否存在這個特徵的解決方案.
反向投影儲存的亮度值,表明測試圖像中該像素屬於某個特徵的機率,也就是說,亮度值相同的位置,屬於同一個特徵的機率越大,亮起的地方几率更大,內部和邊緣之間的陰影影響了檢測的精度.
反向投影的做用是在輸入圖像中尋找特定圖像中最匹配的點或者區域,也就是定位模版圖像在輸入圖像的位置.
投影的結果以每一個輸入圖像像素爲起點的直方圖對比結果,能夠看做是單通道浮點型圖像,或者是一個二維的機率數組集合.
API:void calcBackProject(mat* 輸入圖像數組指針,int 圖像數組個數,int*須要統計的通道索引,inputarray 輸入直方圖,outputarray 目標反向投影陣列,float** 輸入數組的每一維的邊界陣列,int 縮放因子,bool 直方圖是否均勻).
注:該函數用來計算反向投影
有時候咱們計算複雜圖像的反向投影的時候須要抽取出圖像的某個通道單獨使用,這時候就涉及到圖像的通道複製,從輸入圖像中複製某個通道到輸出圖像的指定通道中.
API:mixChannels(mat* 輸入圖像數組,Size_t 輸入數組數量,Mat*輸出數組,size_t 輸出圖像數量,const int * 指定複製通道索引數組,Size 通達索引的數量).
注:該函數屬於splite 和mege的高階通用版本.
使用前面兩個API的例子以下
//反向投影技術 #define WINDOW_NAME1 "src image" //爲窗口標題定義的宏 //-----------------------------------【全局變量聲明部分】-------------------------------------- // 描述:全局變量聲明 //----------------------------------------------------------------------------------------------- Mat g_srcImage; Mat g_hsvImage; Mat g_hueImage; int g_bins = 30;//直方圖組距 //-----------------------------------【全局函數聲明部分】-------------------------------------- // 描述:全局函數聲明 //----------------------------------------------------------------------------------------------- void on_BinChange(int, void* ); //--------------------------------------【main( )函數】----------------------------------------- // 描述:控制檯應用程序的入口函數,咱們的程序從這裏開始執行 //----------------------------------------------------------------------------------------------- int main( ) { //【1】讀取源圖像,並轉換到 HSV 空間 g_srcImage = imread( "F:\\opencv\\OpenCVImage\\backProject.jpg", 1 ); if(!g_srcImage.data ) { printf("讀取圖片錯誤,請肯定目錄下是否有imread函數指定圖片存在~! \n"); return false; } cvtColor( g_srcImage, g_hsvImage, COLOR_BGR2HSV ); //【2】分離 Hue 色調通道 g_hueImage.create( g_hsvImage.size(), g_hsvImage.depth() ); int ch[ ] = { 0, 0 }; mixChannels( &g_hsvImage, 1, &g_hueImage, 1, ch, 1 ); //【3】建立 Trackbar 來輸入bin的數目 namedWindow( WINDOW_NAME1 , WINDOW_AUTOSIZE ); createTrackbar("bin size ", WINDOW_NAME1 , &g_bins, 180, on_BinChange ); on_BinChange(0, 0);//進行一次初始化 //【4】顯示效果圖 imshow( WINDOW_NAME1 , g_srcImage ); // 等待用戶按鍵 waitKey(0); return 0; } //-----------------------------------【on_HoughLines( )函數】-------------------------------- // 描述:響應滑動條移動消息的回調函數 //--------------------------------------------------------------------------------------------- void on_BinChange(int, void* ) { //【1】參數準備 MatND hist; int histSize = MAX( g_bins, 2 ); float hue_range[] = { 0, 180 }; const float* ranges = { hue_range }; //【2】計算直方圖並歸一化 calcHist( &g_hueImage, 1, 0, Mat(), hist, 1, &histSize, &ranges, true, false ); normalize( hist, hist, 0, 255, NORM_MINMAX, -1, Mat() ); //【3】計算反向投影 MatND backproj; calcBackProject( &g_hueImage, 1, 0, hist, backproj, &ranges, 1, true ); //【4】顯示反向投影 imshow( "back project", backproj ); //【5】繪製直方圖的參數準備 int w = 400; int h = 400; int bin_w = cvRound( (double) w / histSize ); Mat histImg = Mat::zeros( w, h, CV_8UC3 ); //【6】繪製直方圖 for( int i = 0; i < g_bins; i ++ ) { rectangle( histImg, Point( i*bin_w, h ), Point( (i+1)*bin_w, h - cvRound( hist.at<float>(i)*h/255.0 ) ), Scalar( 100, 123, 255 ), -1 ); } //【7】顯示直方圖窗口 imshow( "hist", histImg ); }
三.模版匹配
從一幅圖像中尋找和模版最類似的部分的技術,叫作模版匹配,不是基於直方圖的匹配技術,而是經過在輸入圖像上滑動圖像,對實際的圖像塊和輸入圖像進行匹配的一種匹配方法.
API:double matchTemplate(輸入圖像,模版圖像,匹配結果的映射圖像,指定的匹配方法)
注:輸入圖像爲八位圖像或者三十二位浮點型圖像,模版和輸入圖像的類型一致,大小通常不一致,可是不能大於輸入圖像,比較結果的映射圖像,必然是單通道32位浮點型圖像,尺寸爲src1.size-temple.size
匹配方法有如下選擇 TM_SQDIFF平方差匹配法,最好的匹配是0,匹配結果越差,結果越大,TM_SQDIFF_NORMED歸一化平方差匹配,最差匹配是1,最好匹配是0.TM_CCORR相關匹配0是最壞結果,結果越大匹配效果越好,TM_CCORR_NORMED歸一化相關匹配,1完美匹配,0最壞結果,TM_CCOEFF係數匹配,.
匹配時,對於不一樣類型的圖像,可使用不一樣的方法看看哪種的匹配結果最好,使用的例程代碼以下
//模板匹配 //須要一個源文件和一個模板文件 Mat g_srcImage,g_templeImage; const int g_matchMethodMax = 5; int g_matchMethodValue; void onMatchMethod(int pos,void* userData); int main(int argc,char* argv[]) { g_srcImage = imread("F:\\opencv\\OpenCVImage\\match.jpg"); g_templeImage = imread("F:\\opencv\\OpenCVImage\\temple.jpg"); namedWindow("src image"); namedWindow("temp image"); namedWindow("match image"); g_matchMethodValue = 0; createTrackbar("match method", "src image", &g_matchMethodValue, g_matchMethodMax,onMatchMethod,0); onMatchMethod(0,0); //imshow("src image", g_srcImage); imshow("temp image", g_templeImage); moveWindow("src image", 0, 0); moveWindow("temp image", g_srcImage.cols, 0); moveWindow("match image", g_srcImage.cols+g_templeImage.cols, 0); waitKey(0); return 0; } void onMatchMethod(int pos,void* userData) { Mat srcImage; g_srcImage.copyTo(srcImage); Mat tempImage; g_templeImage.copyTo(tempImage); Mat resultImage = Mat(srcImage.rows-tempImage.cols+1,srcImage.cols-tempImage.cols+1,CV_32FC1); matchTemplate(srcImage, tempImage, resultImage, g_matchMethodValue); normalize(resultImage, resultImage, 0, 1,NORM_MINMAX,-1,Mat()); double min_value,max_value; Point minLocation,maxLocation; Point matchLocation; minMaxLoc(resultImage, &min_value, &max_value,&minLocation,&maxLocation,Mat()); if(pos == TM_SQDIFF || pos == TM_SQDIFF_NORMED) { matchLocation = minLocation; printf("匹配度爲%.3f\n",min_value); } else { matchLocation = maxLocation; printf("匹配度爲%.3f\n",max_value); } rectangle(srcImage, matchLocation, Point(matchLocation.x+tempImage.cols,matchLocation.y+tempImage.rows), Scalar(0,0,255),2,8,0); rectangle(resultImage, matchLocation, Point(matchLocation.x+tempImage.cols,matchLocation.y+tempImage.rows), Scalar(0,0,255),2,8,0); imshow("src image", srcImage); imshow("match image", resultImage); }