源地址:http://www.cnblogs.com/easymind223/archive/2012/07/03/2575277.htmlhtml
經常使用Photoshop的玩家都知道Unsharp Mask(USM)銳化,它是一種加強圖像邊緣的銳化算法,原理在此處,若是你想使用這個算法,強烈推薦看一下。本文進行一下簡單的介紹,USM銳化一共分爲三步,第一步生成原始圖片src的模糊圖片和高對比度圖片,記爲blur和contrast.第二,把src和blur做差,獲得一張差分圖片,記爲diff,它就是下圖的UnsharpMask。而後把src和contras按必定的比例相加,這個比例由diff控制,最終獲得銳化圖片。USM有一個缺點,銳化後最大和最小的像素值會超過原始圖片,以下圖紅色虛線和白色實線所示。算法
void MyTreasureBox::UnsharpMask(const IplImage* src, IplImage* dst, float amount, float radius, uchar threshold, int contrast) { if(!src)return ; int imagewidth = src->width; int imageheight = src->height; int channel = src->nChannels; IplImage* blurimage = cvCreateImage(cvSize(imagewidth,imageheight), src->depth, channel); IplImage* DiffImage = cvCreateImage(cvSize(imagewidth,imageheight), 8, channel); //原圖的高對比度圖像 IplImage* highcontrast = cvCreateImage(cvSize(imagewidth,imageheight), 8, channel); AdjustContrast(src, highcontrast, contrast); //原圖的模糊圖像 cvSmooth(src, blurimage, CV_GAUSSIAN, radius); //原圖與模糊圖做差 for (int y=0; y<imageheight; y++) { for (int x=0; x<imagewidth; x++) { CvScalar ori = cvGet2D(src, y, x); CvScalar blur = cvGet2D(blurimage, y, x); CvScalar val; val.val[0] = abs(ori.val[0] - blur.val[0]); val.val[1] = abs(ori.val[1] - blur.val[1]); val.val[2] = abs(ori.val[2] - blur.val[2]); cvSet2D(DiffImage, y, x, val); } } //銳化 for (int y=0; y<imageheight; y++) { for (int x=0; x<imagewidth; x++) { CvScalar hc = cvGet2D(highcontrast, y, x); CvScalar diff = cvGet2D(DiffImage, y, x); CvScalar ori = cvGet2D(src, y, x); CvScalar val; for (int k=0; k<channel; k++) { if (diff.val[k] > threshold) { //最終圖像 = 原始*(1-r) + 高對比*r val.val[k] = ori.val[k] *(100-amount) + hc.val[k] *amount; val.val[k] /= 100; } else { val.val[k] = ori.val[k]; } } cvSet2D(dst, y, x, val); } } cvReleaseImage(&blurimage); cvReleaseImage(&DiffImage); }
其中用到一個調整圖像對比度的函數函數
void MyTreasureBox::AdjustContrast(const IplImage* src, IplImage* dst, int contrast) { if (!src)return ; int imagewidth = src->width; int imageheight = src->height; int channel = src->nChannels; //求原圖均值 CvScalar mean = {0,0,0,0}; for (int y=0; y<imageheight; y++) { for (int x=0; x<imagewidth; x++) { CvScalar ori = cvGet2D(src, y, x); for (int k=0; k<channel; k++) { mean.val[k] += ori.val[k]; } } } for (int k=0; k<channel; k++) { mean.val[k] /= imagewidth * imageheight; } //調整對比度 if (contrast <= -255) { //當增量等於-255時,是圖像對比度的下端極限,此時,圖像RGB各份量都等於閥值,圖像呈全灰色,灰度圖上只有1條線,即閥值灰度; for (int y=0; y<imageheight; y++) { for (int x=0; x<imagewidth; x++) { cvSet2D(dst, y, x, mean); } } } else if(contrast > -255 && contrast <= 0) { //(1)nRGB = RGB + (RGB - Threshold) * Contrast / 255 // 當增量大於-255且小於0時,直接用上面的公式計算圖像像素各份量 //公式中,nRGB表示調整後的R、G、B份量,RGB表示原圖R、G、B份量,Threshold爲給定的閥值,Contrast爲處理過的對比度增量。 for (int y=0; y<imageheight; y++) { for (int x=0; x<imagewidth; x++) { CvScalar nRGB; CvScalar ori = cvGet2D(src, y, x); for (int k=0; k<channel; k++) { nRGB.val[k] = ori.val[k] + (ori.val[k] - mean.val[k]) *contrast /255; } cvSet2D(dst, y, x, nRGB); } } } else if(contrast >0 && contrast <255) { //當增量大於0且小於255時,則先按下面公式(2)處理增量,而後再按上面公式(1)計算對比度: //(2)、nContrast = 255 * 255 / (255 - Contrast) - 255 //公式中的nContrast爲處理後的對比度增量,Contrast爲給定的對比度增量。 CvScalar nRGB; int nContrast = 255 *255 /(255 - contrast) - 255; for (int y=0; y<imageheight; y++) { for (int x=0; x<imagewidth; x++) { CvScalar ori = cvGet2D(src, y, x); for (int k=0; k<channel; k++) { nRGB.val[k] = ori.val[k] + (ori.val[k] - mean.val[k]) *nContrast /255; } cvSet2D(dst, y, x, nRGB); } } } else { //當增量等於 255時,是圖像對比度的上端極限,實際等於設置圖像閥值,圖像由最多八種顏色組成,灰度圖上最多8條線, //即紅、黃、綠、青、藍、紫及黑與白; for (int y=0; y<imageheight; y++) { for (int x=0; x<imagewidth; x++) { CvScalar rgb; CvScalar ori = cvGet2D(src, y, x); for (int k=0; k<channel; k++) { if (ori.val[k] > mean.val[k]) { rgb.val[k] = 255; } else { rgb.val[k] = 0; } } cvSet2D(dst, y, x, rgb); } } } }