OPENCV圖像輪廓檢測

前面在圖像轉換的時候學到canny算子,能夠檢測出圖像的輪廓信息,可是,該算子檢測到的輪廓信息還須要咱們手動的用眼睛去識別,而實際工程應用中,咱們須要獲得輪廓的具體數學信息,這就涉及到今天的主題,圖像輪廓檢測.算法

         一.圖像輪廓檢測api

         在opencv中,輪廓對應着一系列的點的集合,opencv提供了一個函數,用來得到這些點的集合數組

         API:void finContours(輸入圖像,輸出輪廓點集,輸出向量,int 輪廓檢索模式,int 輪廓近似方法,Point 輪廓點的可選偏移量)app

         注:1.輸入圖像,是單通道八位閾值化圖像,也就是對應canny檢測以後的圖像,若是不是閾值化圖像,算法會將圖像中不爲0的點當成1,0點當成0處理,咱們能夠經過canny threshold adaptiveThreshold來處理,函數

            2.該函數在檢測輪廓的時候會修改源圖像的數據,因此最好不要拿源圖像直接檢測輪廓,最好是用拷貝測試

            3.輸出點集的形式爲vector<vector<Point>>contours,每一個向量包含有多個向量,包含的向量一個就是一條輪廓,而包含的向量是由一個個點構成的.編碼

                  4.輸出向量包含了圖像輪廓的拓撲信息,好比這個輪廓的前一個輪廓編號,後一個輪廓編號,父輪廓編號以及子輪廓編號,形式爲vector<vec4i>h,h[0]爲後一個輪廓編號 h[1]後一個輪廓編號 h[2]父輪廓編號 h[3]內嵌輪廓編號spa

                  5.輪廓的檢索模式包含有以下幾種選擇RETR_EXTERNAL只檢測最外圍的輪廓  RETR_LIST提取全部的輪廓,不創建上下等級關係,只有兄弟等級關係  RETR_CCOMP提取全部輪廓,創建爲雙層結構 RETR_TREE提取全部輪廓,創建網狀結構scala

                  6.輪廓的近似方法有如下取值 CHAIN_APPROX_NONE獲取輪廓的每個像素,像素的最大間距不超過1 CHAIN_APPROX_SIMPLE壓縮水平垂直對角線的元素,只保留該方向的終點座標(也就是說一條中垂線a-b,中間的點被忽略了) CHAIN_APPROX_TC89_LI使用TEH_CHAIN逼近算法中的LI算法  CHAIN_APPROX_TC89_KCOS使用TEH_CHAIN逼近算法中的KCOS算法code

                  7.可選偏移量,對ROI區域中得到的輪廓要在整個圖像中分析的時候,該參數能夠派上用場

         二.圖像輪廓繪製

         該函數能夠很方便的繪製出咱們找到的輪廓點集和拓撲結構構成的輪廓圖.

         API:void drawContours(源圖像,輪廓點集,int 繪製指示,scalar 輪廓顏色,int 輪廓粗細,int 輪廓線形,inputarray 輪廓的拓撲結構信息,int 輪廓的最大繪製等級,int 可選的輪廓偏移參數)

         注:1.若是繪製指示爲負值,則繪製全部輪廓,輪廓粗細默認值爲1,爲負值的化,繪製在輪廓的內部進行,使用filled,填充輪廓,線條類型默認爲8,LINE_AA將繪製抗鋸齒的線型,輪廓繪製等級,默認值是INT_MAX,指示最多能夠繪製幾層輪廓,=.

         輪廓檢測的基本步驟爲1.圖像轉換爲灰度圖像,2.圖像的濾波,降噪,3.圖像的canny二值化或者其餘二值化方式,4.尋找輪廓findContours

代碼例子以下

//邊À?緣¦Ì輪?廓¤a查¨¦找¨°
//先¨¨進?行D邊À?緣¦Ì濾?波¡§ 然¨?後¨®用®?canny算?法¤¡§得Ì?到Ì?二t值¦Ì化¡¥圖ª?像?
//濾?波¡§算?法¤¡§選?擇?均¨´值¦Ì濾?波¡§ 可¨¦調Ì¡Â的Ì?是º?孔¡Á徑?尺?寸ä?
//canny算?法¤¡§可¨¦調Ì¡Â的Ì?上¦?下?限T和¨ªsobel算?子Á¨®孔¡Á徑?3 5 7
Mat srcImage,blurImage,grayBlurImage,cannyImage,contourImage;
vector<vector<Point>>g_vContours;
vector<Vec4i>g_vHierarchy;
RNG g_rng(12345);

const int g_BlurMax = 100;
int g_BlurValue;
void onBlurTrackBar(int pos,void* userData);

const int g_sobelSizeMax = 2;//sobel孔¡Á徑?
int g_sobelValue;

const int g_lowThresholdMax = 80;//邊À?緣¦Ì檢¨¬測a低̨ª閾D值¦Ì
int g_lowThresholdValue;
int g_upThresholdValue;

void onTrackBarSobelSize(int pos,void* userData);
void onTrackBarLowThresholdSize(int pos,void* userData);


int main(int argc,char* argv[])
{
    srcImage = imread("F:\\opencv\\OpenCVImage\\findContours.jpg");

    g_BlurValue = 4;
    g_sobelValue = 1;
    g_lowThresholdValue = 80;
    g_upThresholdValue = 240;
    namedWindow("canny Image");
    namedWindow("contours Image");
    createTrackbar("blur size value ", "canny Image", &g_BlurValue, g_BlurMax,onBlurTrackBar,0);
    createTrackbar("sobel size", "canny Image", &g_sobelValue, g_sobelSizeMax,onTrackBarSobelSize,0);
    createTrackbar("low threshold", "canny Image", &g_lowThresholdValue, g_lowThresholdMax,onTrackBarLowThresholdSize,0);
    onBlurTrackBar(g_BlurValue, 0);


    imshow("src image", srcImage);
    moveWindow("src image", 0, 0);
    moveWindow("canny Image", srcImage.cols, 0);
    moveWindow("contour image", srcImage.cols*2, 0);

    waitKey(0);
    return 0;
}

//修T改?了¢?濾?波¡§參?數ºy
void onBlurTrackBar(int pos,void* userData)
{
    int blurSize = g_BlurValue*2+1;
    blur(srcImage, blurImage, Size(blurSize,blurSize));

    int sobelValue = g_sobelValue*2 +3;
    if (g_lowThresholdValue == 0) {
        g_lowThresholdValue = 1;
    }
    int lowThresholdValue = g_lowThresholdValue;
    int upThresholdValue = lowThresholdValue*3;
    cvtColor(blurImage, grayBlurImage, CV_RGB2GRAY);
    //計?算?canny
    Canny(grayBlurImage, cannyImage, lowThresholdValue, upThresholdValue,sobelValue);

    contourImage = Mat::zeros(cannyImage.rows, cannyImage.cols, CV_8UC3);
    findContours(cannyImage, g_vContours, g_vHierarchy, RETR_TREE,CHAIN_APPROX_SIMPLE,Point(0,0));
    for(int i = 0;i<g_vHierarchy.size();i++)
    {
        Scalar color = Scalar(g_rng.uniform(0, 255),g_rng.uniform(0, 255),g_rng.uniform(0, 255));
        drawContours(contourImage, g_vContours, i, color,2,8,g_vHierarchy,0,Point(0,0));
    }

    imshow("contour image", contourImage);
    imshow("canny Image",cannyImage);
}

//修T改?了¢?sobel孔¡Á徑?參?數ºy
void onTrackBarSobelSize(int pos,void* userData)
{
    int blurSize = g_BlurValue*2+1;
    blur(srcImage, blurImage, Size(blurSize,blurSize));

    int sobelValue = g_sobelValue*2 +3;
    if (g_lowThresholdValue == 0) {
        g_lowThresholdValue = 1;
    }
    int lowThresholdValue = g_lowThresholdValue;
    int upThresholdValue = lowThresholdValue*3;
    cvtColor(blurImage, grayBlurImage, CV_RGB2GRAY);
    //計?算?canny
    Canny(grayBlurImage, cannyImage, lowThresholdValue, upThresholdValue,sobelValue);

    contourImage = Mat::zeros(cannyImage.rows, cannyImage.cols, CV_8UC3);
    findContours(cannyImage, g_vContours, g_vHierarchy, RETR_TREE,CHAIN_APPROX_SIMPLE,Point(0,0));
    for(int i = 0;i<g_vHierarchy.size();i++)
    {
        Scalar color = Scalar(g_rng.uniform(0, 255),g_rng.uniform(0, 255),g_rng.uniform(0, 255));
        drawContours(contourImage, g_vContours, i, color,2,8,g_vHierarchy,0,Point(0,0));
    }

    imshow("contour image", contourImage);
    imshow("canny Image",cannyImage);
}

//修T改?了¢?閾D值¦Ì參?數ºy
void onTrackBarLowThresholdSize(int pos,void* userData)
{
    int blurSize = g_BlurValue*2+1;
    blur(srcImage, blurImage, Size(blurSize,blurSize));

    int sobelValue = g_sobelValue*2 +3;
    if (g_lowThresholdValue == 0) {
        g_lowThresholdValue = 1;
    }
    int lowThresholdValue = g_lowThresholdValue;
    int upThresholdValue = lowThresholdValue*3;
    cvtColor(blurImage, grayBlurImage, CV_RGB2GRAY);
    //計?算?canny
    Canny(grayBlurImage, cannyImage, lowThresholdValue, upThresholdValue,sobelValue);

    contourImage = Mat::zeros(cannyImage.rows, cannyImage.cols, CV_8UC3);
    findContours(cannyImage, g_vContours, g_vHierarchy, RETR_TREE,CHAIN_APPROX_SIMPLE,Point(0,0));
    for(int i = 0;i<g_vHierarchy.size();i++)
    {
        Scalar color = Scalar(g_rng.uniform(0, 255),g_rng.uniform(0, 255),g_rng.uniform(0, 255));
        drawContours(contourImage, g_vContours, i, color,2,8,g_vHierarchy,0,Point(0,0));
    }

    imshow("contour image", contourImage);
    imshow("canny Image",cannyImage);
}

二.凸包

         凸包是指,給定一個二維平面上的點集,凸包就是將這個點集最外層的點鏈接起來的構成的凸多邊形

         計算一個物體的凸包,而後計算凸包的凹缺陷,是理解物體輪廓與形狀的有效方式,能夠用於典型的類似物體查找中.

         API:void convertHull(輸入二維點集,輸出凸包,bool 操做方向標識符,bool 返回點類型)

         注:1.操做方向標識符,是指在笛卡爾座標中,當爲true的時候,起始點到結束點順時針,不然,逆時針.

                  2返回點類型爲真時,返回凸包的各個定點,不然,返回凸包各點的指數,當輸出爲vector<point>時,這個參數被忽略.

                  3.返回的二維點集形態類是vector<point>,獲得的凸包類型,也是vector<point>hull,hull.size()是凸包的點的個數.

         通常尋找凸包,主要是先對圖像二值化,後尋找輪廓,而後尋找一條指定輪廓的凸包.

示例代碼以下

//對?一°?張?圖ª?片?進?行D輪?廓¤a查¨¦找¨°,並¡é對?每?一°?個?查¨¦找¨°出?來¤¡ä的Ì?輪?廓¤a運?行D凸ª1包㨹查¨¦找¨°程¨¬序¨°,生¦¨²成¨¦最Á?終?顯?示º?圖ª?片?
//邊À?緣¦Ì輪?廓¤a查¨¦找¨°
//先¨¨進?行D邊À?緣¦Ì濾?波¡§ 然¨?後¨®用®?canny算?法¤¡§得Ì?到Ì?二t值¦Ì化¡¥圖ª?像?
//濾?波¡§算?法¤¡§選?擇?均¨´值¦Ì濾?波¡§ 可¨¦調Ì¡Â的Ì?是º?孔¡Á徑?尺?寸ä?
//canny算?法¤¡§可¨¦調Ì¡Â的Ì?上¦?下?限T和¨ªsobel算?子Á¨®孔¡Á徑?3 5 7
Mat srcImage,blurImage,grayBlurImage,cannyImage,contourImage,hullImage;
vector<vector<Point>>g_vContours;
vector<Vec4i>g_vHierarchy;
RNG g_rng(12345);

const int g_BlurMax = 100;
int g_BlurValue;
void onBlurTrackBar(int pos,void* userData);

const int g_sobelSizeMax = 2;//sobel孔¡Á徑?
int g_sobelValue;

const int g_lowThresholdMax = 80;//邊À?緣¦Ì檢¨¬測a低̨ª閾D值¦Ì
int g_lowThresholdValue;
int g_upThresholdValue;

void onTrackBarSobelSize(int pos,void* userData);
void onTrackBarLowThresholdSize(int pos,void* userData);


int main(int argc,char* argv[])
{
   srcImage = imread("F:\\opencv\\OpenCVImage\\convexHull.jpg");
   
   g_BlurValue = 4;
   g_sobelValue = 1;
   g_lowThresholdValue = 80;
   g_upThresholdValue = 240;
   namedWindow("canny Image");
   namedWindow("contours Image");
   namedWindow("hull image");
   createTrackbar("blur size value ", "canny Image", &g_BlurValue, g_BlurMax,onBlurTrackBar,0);
   createTrackbar("sobel size", "canny Image", &g_sobelValue, g_sobelSizeMax,onTrackBarSobelSize,0);
   createTrackbar("low threshold", "canny Image", &g_lowThresholdValue, g_lowThresholdMax,onTrackBarLowThresholdSize,0);
   onBlurTrackBar(g_BlurValue, 0);

   moveWindow("src image", 0, 0);
   moveWindow("canny Image", srcImage.cols, srcImage.rows);
   moveWindow("contour image", srcImage.cols*2, 0);
   moveWindow("hull image", srcImage.cols*2, srcImage.rows);

   waitKey(0);
   return 0;
}

//修T改?了¢?濾?波¡§參?數ºy
void onBlurTrackBar(int pos,void* userData)
{
   imshow("src image", srcImage);
   
   int blurSize = g_BlurValue*2+1;
   blur(srcImage, blurImage, Size(blurSize,blurSize));

   int sobelValue = g_sobelValue*2 +3;
   if (g_lowThresholdValue == 0) {
       g_lowThresholdValue = 1;
   }
   int lowThresholdValue = g_lowThresholdValue;
   int upThresholdValue = lowThresholdValue*3;
   cvtColor(blurImage, grayBlurImage, CV_RGB2GRAY);
   //計?算?canny
   Canny(grayBlurImage, cannyImage, lowThresholdValue, upThresholdValue,sobelValue);

   contourImage = Mat::zeros(cannyImage.rows, cannyImage.cols, CV_8UC3);
   findContours(cannyImage, g_vContours, g_vHierarchy, RETR_TREE,CHAIN_APPROX_SIMPLE,Point(0,0));
   for(int i = 0;i<g_vHierarchy.size();i++)
   {
       Scalar color = Scalar(g_rng.uniform(0, 255),g_rng.uniform(0, 255),g_rng.uniform(0, 255));
       drawContours(contourImage, g_vContours, i, color,2,8,g_vHierarchy,0,Point(0,0));
   }
   
   vector<vector<Point>>hull(g_vContours.size());
   hullImage = Mat::zeros(cannyImage.rows, cannyImage.cols, CV_8UC3);
   for(int i = 0; i < g_vContours.size(); i++)
   {
       convexHull(Mat(g_vContours[i]), hull[i],false);
       Scalar color = Scalar(g_rng.uniform(0, 255),g_rng.uniform(0, 255),g_rng.uniform(0, 255));
       drawContours(hullImage, hull, i, color,1,8,vector<Vec4i>(),0,Point(0,0));
   }

   imshow("hull image", hullImage);
   imshow("contour image", contourImage);
   imshow("canny Image",cannyImage);
}

//修T改?了¢?sobel孔¡Á徑?參?數ºy
void onTrackBarSobelSize(int pos,void* userData)
{
   imshow("src image", srcImage);

   int blurSize = g_BlurValue*2+1;
   blur(srcImage, blurImage, Size(blurSize,blurSize));

   int sobelValue = g_sobelValue*2 +3;
   if (g_lowThresholdValue == 0) {
       g_lowThresholdValue = 1;
   }
   int lowThresholdValue = g_lowThresholdValue;
   int upThresholdValue = lowThresholdValue*3;
   cvtColor(blurImage, grayBlurImage, CV_RGB2GRAY);
   //計?算?canny
   Canny(grayBlurImage, cannyImage, lowThresholdValue, upThresholdValue,sobelValue);

   contourImage = Mat::zeros(cannyImage.rows, cannyImage.cols, CV_8UC3);
   findContours(cannyImage, g_vContours, g_vHierarchy, RETR_TREE,CHAIN_APPROX_SIMPLE,Point(0,0));
   for(int i = 0;i<g_vHierarchy.size();i++)
   {
       Scalar color = Scalar(g_rng.uniform(0, 255),g_rng.uniform(0, 255),g_rng.uniform(0, 255));
       drawContours(contourImage, g_vContours, i, color,2,8,g_vHierarchy,0,Point(0,0));
   }
   vector<vector<Point>>hull(g_vContours.size());
   hullImage = Mat::zeros(cannyImage.rows, cannyImage.cols, CV_8UC3);
   for(int i = 0; i < g_vContours.size(); i++)
   {
       convexHull(Mat(g_vContours[i]), hull[i],false);
       Scalar color = Scalar(g_rng.uniform(0, 255),g_rng.uniform(0, 255),g_rng.uniform(0, 255));
       drawContours(hullImage, hull, i, color,1,8,vector<Vec4i>(),0,Point(0,0));
   }
   
   imshow("hull image", hullImage);
   imshow("contour image", contourImage);
   imshow("canny Image",cannyImage);
}

//修T改?了¢?閾D值¦Ì參?數ºy
void onTrackBarLowThresholdSize(int pos,void* userData)
{
   imshow("src image", srcImage);

   int blurSize = g_BlurValue*2+1;
   blur(srcImage, blurImage, Size(blurSize,blurSize));

   int sobelValue = g_sobelValue*2 +3;
   if (g_lowThresholdValue == 0) {
       g_lowThresholdValue = 1;
   }
   int lowThresholdValue = g_lowThresholdValue;
   int upThresholdValue = lowThresholdValue*3;
   cvtColor(blurImage, grayBlurImage, CV_RGB2GRAY);
   //計?算?canny
   Canny(grayBlurImage, cannyImage, lowThresholdValue, upThresholdValue,sobelValue);

   contourImage = Mat::zeros(cannyImage.rows, cannyImage.cols, CV_8UC3);
   findContours(cannyImage, g_vContours, g_vHierarchy, RETR_TREE,CHAIN_APPROX_SIMPLE,Point(0,0));
   for(int i = 0;i<g_vHierarchy.size();i++)
   {
       Scalar color = Scalar(g_rng.uniform(0, 255),g_rng.uniform(0, 255),g_rng.uniform(0, 255));
       drawContours(contourImage, g_vContours, i, color,2,8,g_vHierarchy,0,Point(0,0));
   }
   vector<vector<Point>>hull(g_vContours.size());
   hullImage = Mat::zeros(cannyImage.rows, cannyImage.cols, CV_8UC3);
   for(int i = 0; i < g_vContours.size(); i++)
   {
       convexHull(Mat(g_vContours[i]), hull[i],false);
       Scalar color = Scalar(g_rng.uniform(0, 255),g_rng.uniform(0, 255),g_rng.uniform(0, 255));
       drawContours(hullImage, hull, i, color,1,8,vector<Vec4i>(),0,Point(0,0));
   }
   
   imshow("hull image", hullImage);
   imshow("contour image", contourImage);
   imshow("canny Image",cannyImage);
}

三.對於輪廓表明的二維點集的其餘處理方式

         獲取點集的外圍矩形邊界

         API:Rect boundingRect(輸入點集)

         返回點集的最小包圍矩形

         API:RotatedRect minAreaRect(輸入點集)

         尋找點集的最小包圍圓心

         API:void minEnclosingCircle(輸入點集,Point2f& 圓心,float& 半徑)

         橢圓擬合二維點集

         API:RatatedRect fitEllipse(輸入點集)

         逼近多邊形曲線

         API:void approxPolyDp(輸入二維點集,輸出多邊形逼近結果,double epsilon,bool close是否封閉)

         注:epsilon爲原始曲線和近似曲線之間的最大值

            closed爲真,則封閉,爲假,獲得的多邊形不封閉

         以上各個API使用例程以下

//首º¡Á先¨¨查¨¦找¨°輪?廓¤a,再¨´用®?多¨¤邊À?形?逼À?近¨¹輪?廓¤a
//然¨?後¨®依°¨¤靠?多¨¤邊À?形?輪?廓¤a獲?得Ì?包㨹圍¡ì多¨¤邊À?形?輪?廓¤a的Ì?圓2形? 最Á?小?矩?形? 矩?形?邊À?界?
//需¨¨要°a變À?更¨¹的Ì?只?有®D二t值¦Ì化¡¥操¨´做Á¡Â時º¡À的Ì?閾D值¦Ì
//閾D值¦Ì最Á?大䨮值¦Ì 255 最Á?小?值¦Ì可¨¦變À?
Mat srcImage,srcCopyImage,srcGrayImage,srcThresholdImage,DstImage;
vector<vector<Point>>contours;
vector<Vec4i> hierarchys;
const int g_lowThresholdMax = 254;
int g_lowThresholdValue;
int g_upThresholdValue;
void onTrackBarLowThreshold(int pos,void* userData);
RNG g_rng(12345);
int main(int argc,char* argv[])
{
   srcImage = imread("F:\\opencv\\OpenCVImage\\contour.jpg");
   srcCopyImage = srcImage.clone();
   //轉Áa化¡¥RGB爲a灰¨°度¨¨圖ª?像?
   if(srcImage.channels() == 3)
   {
       cvtColor(srcImage, srcGrayImage, CV_RGB2GRAY);
   }
   else
   {
       srcGrayImage = srcImage.clone();
   }
   blur(srcGrayImage, srcGrayImage,Size(3,3));
   
   namedWindow("src image");
   namedWindow("threshold image");
   namedWindow("dst image");
   
   g_lowThresholdValue = 80;
   g_upThresholdValue = 255;
   createTrackbar("low threshold value", "threshold image", &g_lowThresholdValue, g_lowThresholdMax,onTrackBarLowThreshold,0);
   onTrackBarLowThreshold(g_lowThresholdValue, 0);
   
   imshow("src image", srcImage);
   
   moveWindow("src image", 0, 0);
   moveWindow("threshold image", srcImage.cols, 0);
   moveWindow("dst image", srcImage.cols*2, 0);
   
   waitKey(0);
   return 0;
}


void onTrackBarLowThreshold(int pos,void* userData)
{
   if(g_lowThresholdValue == 0)g_lowThresholdValue = 1;
   threshold(srcGrayImage, srcThresholdImage, g_lowThresholdValue, g_upThresholdValue, THRESH_BINARY);
   //二t值¦Ì化¡¥完ª¨º成¨¦,尋¡ã找¨°輪?廓¤a
   findContours(srcThresholdImage, contours, hierarchys, RETR_TREE, CHAIN_APPROX_SIMPLE,Point(0,0));
   //多¨¤邊À?形?畢À?竟1輪?廓¤a,先¨¨生¦¨²成¨¦變À?量¢?
   vector<vector<Point>>contours_polys(contours.size());//多¨¤邊À?形?
   vector<Rect>boundRect(contours.size());//輪?廓¤a最Á?外ªa層?矩?形?邊À?界?
   vector<Point2f>center(contours.size());//最Á?小?面?積y包㨹圍¡ì圓2
   vector<float>radius(contours.size());
   
   DstImage = srcCopyImage.clone();
   for(int i = 0; i < contours.size();i++)
   {
       //逼À?近¨¹多¨¤邊À?形?
       approxPolyDP(Mat(contours[i]), contours_polys[i], 3, true);//逼À?近¨¹精?度¨¨3且¨°封¤a閉À?
       //從䨮逼À?近¨¹到Ì?的Ì?多¨¤邊À?形?得Ì?到Ì?最Á?外ªa層?矩?形?
       boundRect[i] = boundingRect(Mat(contours_polys[i]));
       //從䨮逼À?近¨¹的Ì?多¨¤邊À?形?得Ì?到Ì?最Á?小?圓2形?
       minEnclosingCircle(Mat(contours_polys[i]), center[i], radius[i]);
   }
   //先¨¨繪?制?輪?廓¤a
   drawContours(DstImage, contours, -1, Scalar(g_rng.uniform(0, 255),g_rng.uniform(0, 255),g_rng.uniform(0, 255)));
   //依°¨¤次ä?在¨²輪?廓¤a上¦?繪?制?矩?形?和¨ª圓2形?
   for(int i = 0; i < contours.size(); i++)
   {
       rectangle(DstImage, boundRect[i].tl(), boundRect[i].br(), Scalar(g_rng.uniform(0, 255),g_rng.uniform(0, 255),g_rng.uniform(0, 255)),2,8,0);
       circle(DstImage, center[i], (int)radius[i], Scalar(g_rng.uniform(0, 255),g_rng.uniform(0, 255),g_rng.uniform(0, 255)),2,8,0);
   }
   
   imshow("threshold image", srcThresholdImage);
   imshow("dst image", DstImage);
}

四.圖像的矩

         圖像說到底仍是一個矩陣,而矩陣在不一樣的空間大小的狀況下,進行分析的時候就須要用到矩陣的矩,也就是圖像的矩,矩函數在圖像分析中具備重要的做用,模式識別,目標分類,目標識別方位估計,圖像編碼重構等都須要用到圖像的矩.

         圖像的一階矩與圖像的形狀相關,二階矩顯示圖像中曲線圍繞直線平均值的擴展程度,三階矩是關於平均值的對稱性的測量,由二階矩和三階矩能夠處處七個不變矩,也便是7Hu不變矩.

         不變矩是圖像的統計特徵,知足平移,旋轉,伸縮均不變的不變性,能夠用於圖像識別.

         另外,經過中心矩也能夠計算圖像的輪廓長度以及面積.

         API:Moments moments(源圖像,bool 非0像素是否全看作1);

         注:源能夠是二維數組或者單通道,八位或浮點型

                  第二個參數默認爲false,非零像素不所有看做爲1

         計算輪廓面積

         API:double contourArea(輸入,bool 面向區域標識符)

         輸入輸入爲向量或者是二維點,也能夠說Mat,通常是查找到的圖像的輪廓點集或者是根據輪廓點集擬合出的矩形,橢圓,圓,多邊形,也可使圖像的凸包vector<poit2f>,返回圖像的面積,默認爲false,表示返回圖像的面積是絕對值,不帶符號.

         計算輪廓長度

         API:double arcLength(輸入點集,bool 指示曲線是否封閉)

         注:輸入點集類型與上一個api一致,默認曲線是封閉的.

測試使用圖像的中心矩和opencv提供的算法,來計算圖像輪廓面積,代碼以下

//圖ª?像?的Ì?矩?
//canny算?法¤¡§獲?得Ì?二t值¦Ì圖ª?像? 二t值¦Ì圖ª?像?獲?得Ì?邊À?緣¦Ì 邊À?緣¦Ì獲?得Ì?邊À?緣¦Ì矩? 邊À?緣¦Ì矩?獲?得Ì?中D心?距¨¤
//通ª¡§過y中D心?距¨¤計?算?圖ª?像?的Ì?輪?廓¤a的Ì?面?積y和¨ª距¨¤離¤?

Mat srcImage,srcGrayImage,srcBlurImage,srcThresholdImage,srcCopyImage;

vector<vector<Point>>contours;
vector<Vec4i> hierarchys;

const int g_lowThresholdMax = 85;
int g_lowThresholdValue;
int g_upThresholdValue;
void onTrackBarLowThreshold(int pos,void* userData);

int main(int argc,char* argv[])
{
   srcImage = imread("F:\\opencv\\OpenCVImage\\mement.jpg");
   
   if(srcImage.channels() == 3)
   {
       cvtColor(srcImage, srcGrayImage, CV_RGB2GRAY);
   }
   else
   {
       srcGrayImage = srcImage.clone();
   }
   
   blur(srcGrayImage,srcBlurImage,Size(3,3));
   
   namedWindow("canny image");
   g_lowThresholdValue = 80;
   g_upThresholdValue = g_lowThresholdValue*3;
   createTrackbar("low threshold value", "canny image", &g_lowThresholdValue, g_lowThresholdMax,onTrackBarLowThreshold,0);
   onTrackBarLowThreshold(g_lowThresholdValue, 0);
 
   imshow("src image", srcImage);
   
   moveWindow("src image", 0, 0);
   moveWindow("canny image", srcBlurImage.cols, 0);
   moveWindow("dst image", srcBlurImage.cols*2, 0);
   
   waitKey(0);
   return 0;
}



void onTrackBarLowThreshold(int pos,void* userData)
{
   Canny(srcBlurImage, srcThresholdImage, g_lowThresholdValue, g_upThresholdValue,3);
   //二t進?制?圖ª?像?獲?取¨?輪?廓¤a
   findContours(srcThresholdImage, contours, hierarchys, RETR_TREE, CHAIN_APPROX_SIMPLE,Point(0,0));
   //計?算?矩?
   vector<Moments>mu(contours.size());
   for(int i = 0; i < contours.size(); i++)
   {
       mu[i] = moments(contours[i],false);
   }
   //計?算?中D心?矩?
   vector<Point2f>mc(contours.size());
   for(int i = 0; i < contours.size(); i++)
   {
       mc[i] = Point2f(static_cast<float>(mu[i].m10/mu[i].m00),static_cast<float>(mu[i].m01/mu[i].m00));
   }
   //繪?制?輪?廓¤a
   //srcCopyImage = srcImage.clone();
   srcCopyImage = Mat(srcImage.rows,srcImage.cols,CV_8UC1,Scalar::all(0));
   drawContours(srcCopyImage, contours, -1, Scalar(255));
   for(int i = 0; i < contours.size(); i++)
   {
       circle(srcCopyImage, mc[i], 4, Scalar(255),-1,8,0);
   }
   imshow("canny image", srcThresholdImage);
   imshow("dst image", srcCopyImage);
   
   //開a始º?計?算?輪?廓¤a並¡é且¨°輸º?出?值¦Ì,通ª¡§過y矩?和¨ªopencv函¡¥數ºy計?算?出?來¤¡ä的Ì?面?積y對?比À¨¨
   for(int i = 0; i < contours.size();i++)
   {
       printf("計?算?輪?廓¤a面?積y以°?及¡ã長¡è度¨¨,第̨²%d個?輪?廓¤a的Ì?面?積y爲a(矩?計?算?得Ì?出?):%.2f\n通ª¡§過yopencv函¡¥數ºy計?算?出?來¤¡ä的Ì?面?積y爲a%.2f\t長¡è度¨¨爲a%.2f\n",i,mu[i].m00,contourArea(contours[i],false),arcLength(contours[i],true));
   }
}

五.分水嶺算法

         分水嶺算法的主要意義在於分割圖像,從背景圖像中得到有用信息,好比在一張圖像中,前景和背景的像素差別老是很大,此時須要將前景背景分離開來,就須要分水嶺算法了.

         分水嶺算法市一中基於標記的分割算法,表示的是輸入圖像的極大值點,在把圖像傳遞給函數以前,須要大體勾畫出圖像中須要分割的區域,這些標記的值可使用輪廓查找算法和輪廓繪製算法在圖像中標記.

         最終造成的,是由極值點構成的一個一個的區域,若是圖像中目標是連在一塊兒的,分割起來有困難,可使用該算法將黏着在一塊兒的目標分開

         直接用邊界來進行分水嶺算法的效果不佳,通常來講,先對先後景進行標記,在應用分水嶺算法,每一個對象內部都是相連的,背景裏面的每一個像素都不屬於任何目標,在應用分水嶺算法就會取得較好的效果.

         void waterShed(輸入圖像,圖像掩碼);

         注:輸入圖像必須爲八位三通道彩色圖像,掩碼是運算結果,32位單通道圖像,和源圖有同樣的尺寸和類型,爲啥爲32位呢,由於一張圖像徹底可能被分紅不止255個區域,那八位就不夠用了呀.

         具體效果看以下代碼例程

//分水嶺算法waterShed

//在源圖像中繪製區域線條,同時把繪製的區域線條保存在mask中,而後對mask進行

//輪廓查找算法,找到輪廓之後,在新的掩碼中按照不一樣的輪廓繪製不一樣的灰度值

//調用分水嶺算法,根據結果的不一樣灰度進行着色,最終的圖像和源圖像混合生成最終圖像

//這就是分水嶺算法的意義

//分水嶺算法配合膨脹 腐蝕等形態學運算,效果應該很好

Mat srcImage,srcImageCopy;
Mat maskImage,maskImageCopy;
bool draw;
Point2i prevPoint;//記?錄?前¡ã一°?個?鼠º¨®標À¨º事º?件t點Ì?的Ì?位?置?
RNG g_rng(12345);
void onMouseEvent(int event,int x,int y,int flag,void* userData);
int main(int argc,char* argv[])
{
   srcImage = imread("F:\\opencv\\OpenCVImage\\waterShed.jpg");
   imshow("src image", srcImage);
   prevPoint = Point2i(-1,-1);
   srcImageCopy = srcImage.clone();
   maskImage = Mat(srcImage.rows,srcImage.cols,CV_32SC1,Scalar::all(0));
   maskImageCopy = maskImage.clone();
   
   setMouseCallback("src image", onMouseEvent);
   imshow("dst image", maskImage);
   
   moveWindow("src image", 0, 0);
   moveWindow("dst image", srcImage.cols, 0);
   draw = false;
   
   int keyValue = 0;
   do
   {
       keyValue = waitKey(30);
       if(keyValue == '1')//清?除y圖ª?像?
       {
           draw = false;
           srcImageCopy = srcImage.clone();
           maskImageCopy = maskImage.clone();
           prevPoint = Point2i(-1,-1);
           imshow("src image", srcImageCopy);
           imshow("dst image", maskImageCopy);
       }
       else if(keyValue == '2')
       {
           //開a始º?計?算?
           if(draw == true)//有®D輪?廓¤a
           {
               vector<vector<Point2i>>contours;
               vector<Vec4i>hierarchy;
               findContours(maskImageCopy, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);
               //檢¨¬查¨¦輪?廓¤a
               if(contours.size() == 0)continue;
               //找¨°出?輪?廓¤a以°?後¨®將?輪?廓¤a繪?制?在¨²源¡ämask上¦?
               //對?不?同ª?的Ì?輪?廓¤a,用®?不?同ª?的Ì?值¦Ì進?行D區?分¤?
               Mat maskDstImage = Mat(maskImageCopy.size(),CV_32SC1,Scalar::all(0));
               int c_comp = 0;
               for(int i = 0 ; i >= 0;i = hierarchy[i][0],c_comp++)
               {
                   //hierarchy[i][0]對?應®|後¨®一°?個?輪?廓¤a
                   //繪?制?輪?廓¤a,輪?廓¤a的Ì?值¦Ì依°¨¤次ä?爲a1 2 3 4 5
                   drawContours(maskDstImage, contours, i, Scalar::all(c_comp+1),-1,8,hierarchy,INT_MAX);
               }
               //此ä?時º¡À,一°?共2有®Dc_comp個?輪?廓¤a點Ì?,生¦¨²成¨¦scalar數ºy組Á¨¦,用®?於®¨²後¨®期¨²着Á?色¦?
               vector<Vec3b>colorTab;
               for (int i = 0; i < c_comp; i++) {
                   int r = g_rng.uniform(0, 255);
                   int g = g_rng.uniform(0, 255);
                   int b = g_rng.uniform(0, 255);
                   colorTab.push_back(Vec3b((uchar)r,(uchar)g,(uchar)b));
               }
               //進?行D分¤?水?嶺¢?處ä|理¤¨ª算?法¤¡§
               double startTime = (double)getTickCount();
               watershed(srcImageCopy, maskDstImage);
               double endTime = (double)getTickCount();
               printf("算?法¤¡§使º1用®?時º¡À間?爲a%.2f \r\n",((endTime-startTime)*1000)/getTickFrequency());
               //得Ì?到Ì?了¢?分¤?水?嶺¢?圖ª?像?以°?後¨®,按ã¡ä照?colortab的Ì?內¨²容¨Y,對?分¤?水?嶺¢?圖ª?像?進?行D區?域®¨°着Á?色¦?
               //分¤?水?嶺¢?算?法¤¡§的Ì?處ä|理¤¨ª結¨¢果?存ä?放¤?在¨²maskdstImage中D
               Mat dstImage(maskDstImage.size(),CV_8UC3);
               for(int i = 0 ; i< maskDstImage.rows;i++)
               {
                   for (int j = 0; j < maskDstImage.cols; j++) {
                       int index = maskDstImage.at<int>(i,j);
                       if(index == -1)
                       {
                           dstImage.at<Vec3b>(i,j) = Vec3b(255,255,255);
                       }
                       else if(index < 0 || index > c_comp)
                       {
                           dstImage.at<Vec3b>(i,j) = Vec3b(0,0,0);
                       }
                       else
                       {
                           dstImage.at<Vec3b>(i,j) = colorTab[index-1];
                       }
                   }
               }
               //再¨´把ã?源¡ä圖ª?像?和¨ª得Ì?到Ì?的Ì?掩¨²碼?圖ª?像?混¨¬合?顯?示º?新?的Ì?圖ª?片?
               addWeighted(srcImage, 0.5, dstImage, 0.5, 0.0, dstImage);
               //這a裏¤?就¨ª已°?經-得Ì?到Ì?最Á?終?的Ì?maskdst,顯?示º?看¡ä看¡ä
               imshow("dst image", dstImage);
           }
           else
           {
               continue;
           }
       }
       
   }while(keyValue != 27);
   
   return 0;
}

void onMouseEvent(int event,int x,int y,int flag,void* userData)
{
   if(x < 0||y< 0||x>=srcImage.cols||y>=srcImage.rows)
       return;
   if(event == EVENT_LBUTTONDOWN)
   {
       prevPoint = Point2i(x,y);
   }
   else if(event == EVENT_LBUTTONUP)
   {
       prevPoint = Point2i(-1,-1);
   }
   else if(event == EVENT_MOUSEMOVE)
   {
       if(flag&EVENT_FLAG_LBUTTON)
       {
           //鼠º¨®標À¨º左Á¨®鍵¨¹滑?動¡¥
           draw = true;
           line(srcImageCopy, prevPoint,Point2i(x,y), Scalar::all(255),4,LINE_AA);
           line(maskImageCopy,prevPoint,Point2i(x,y), Scalar::all(INT_MAX),4,LINE_AA);
           prevPoint = Point2i(x,y);
           imshow("dst image", maskImageCopy);
           imshow("src image", srcImageCopy);
       }
   }
}

六.圖像修復

        圖像修復是指利用那些已經被破壞區域的邊沿,即邊緣的顏色和結構,經過對它的繁殖和混合,填充的被損壞區域中去,已達到圖像修補的目的,注意,邊緣損壞過多的圖像也是難以修復的,對於小塊破損比較有效

         API:void inPaint(原圖片,修復區域掩碼,輸出圖片,double inpaintRadius修復算法參考半徑,int 修復算法標識符)

         注:1.原圖片必須爲八位單通道或者是三通道圖片,掩碼是八位單通道圖片,其中那個非零的那些像素點表示須要修復的區域,輸出圖像必須和源圖像有同樣的尺寸和類型,最小修復半徑修復的每一個店所參考的周圍區域顏色的半徑.

                  2.修復算法標識符表示用什麼修復算法進行計算,取值INPAINT_NS基於nerier_stokes方法 ALEXANDRU_TELEA另外一種方法.

         修復圖片的例子見面的代碼

//圖ª?像?修T補1
//需¨¨要°a原-圖ª? 掩¨²碼?圖ª? 目?標À¨º圖ª?
//修T復¡ä算?法¤¡§參?考?半ã?徑?爲a3 修T復¡ä方¤?法¤¡§爲a INPAINT_TELEA

Mat srcImage,srcImageCopy;
Mat maskImage,maskImageCopy;
bool draw;

Point2i prevPoint;//記?錄?前¡ã一°?個?鼠º¨®標À¨º事º?件t點Ì?的Ì?位?置?
void onMouseEvent(int event,int x,int y,int flag,void* userData);


int main(int argc,char* argv[])
{
    srcImage = imread("F:\\opencv\\OpenCVImage\\inpaint.jpg");
    imshow("src image", srcImage);
    prevPoint = Point2i(-1,-1);
    srcImageCopy = srcImage.clone();
    maskImage = Mat(srcImage.rows,srcImage.cols,CV_8UC1,Scalar::all(0));
    maskImageCopy = maskImage.clone();

    setMouseCallback("src image", onMouseEvent);
    imshow("dst image", maskImage);

    moveWindow("src image", 0, 0);
    moveWindow("dst image", srcImage.cols, 0);
    draw = false;



    int keyValue = 0;
    do {
        keyValue = waitKey(30);
        if(keyValue == '1')
        {
            //清?除y
            draw = false;
            srcImageCopy = srcImage.clone();
            maskImageCopy = maskImage.clone();
            prevPoint = Point2i(-1,-1);
            imshow("src image", srcImageCopy);
            imshow("dst image", maskImageCopy);
        }
        else if(keyValue == '2')
        {
            //修T復¡ä
            if(draw == true)
            {
                Mat srcImageInpaint = srcImageCopy.clone();
                Mat dstImage = Mat(srcImageInpaint.size(),CV_8UC3,Scalar::all(0));
                inpaint(srcImageInpaint, maskImageCopy, dstImage, 3, INPAINT_TELEA);
                srcImageCopy = srcImage.clone();
                maskImageCopy = maskImage.clone();
                prevPoint = Point2i(-1,-1);
                imshow("dst image", dstImage);
            }
        }
    } while (keyValue != 27);
    return 0;
}

void onMouseEvent(int event,int x,int y,int flag,void* userData)
{
    if(x < 0||y< 0||x>=srcImage.cols||y>=srcImage.rows)
        return;
    if(event == EVENT_LBUTTONDOWN)
    {
        prevPoint = Point2i(x,y);
    }
    else if(event == EVENT_LBUTTONUP)
    {
        prevPoint = Point2i(-1,-1);
    }
    else if(event == EVENT_MOUSEMOVE)
    {
        if(flag&EVENT_FLAG_LBUTTON)
        {
            //鼠º¨®標À¨º左Á¨®鍵¨¹滑?動¡¥
            draw = true;
            line(srcImageCopy, prevPoint,Point2i(x,y), Scalar::all(255),4,LINE_AA);
            line(maskImageCopy,prevPoint,Point2i(x,y), Scalar::all(INT_MAX),4,LINE_AA);
            prevPoint = Point2i(x,y);
            imshow("dst image", maskImageCopy);
            imshow("src image", srcImageCopy);
        }
    }
}
相關文章
相關標籤/搜索