一個簡單的人物圖片類似對比程序

程序是參考了兩部分的別人的代碼,一個是找出照片中的人頭,另外一個是對兩我的頭照片作對比。python

# -*- coding: utf-8 -*-
# feimengjuan
# 利用python實現多種方法來實現圖像識別

import cv2
import numpy as np
from matplotlib import pyplot as plt, image


# 最簡單的以灰度直方圖做爲類似比較的實現
def classify_gray_hist(image1, image2, size=(256, 256)):
    # 先計算直方圖
    # 幾個參數必須用方括號括起來
    # 這裏直接用灰度圖計算直方圖,因此是使用第一個通道,
    # 也能夠進行通道分離後,獲得多個通道的直方圖
    # bins 取爲16
    image1 = cv2.resize(image1, size)
    image2 = cv2.resize(image2, size)
    hist1 = cv2.calcHist([image1], [0], None, [256], [0.0, 255.0])
    hist2 = cv2.calcHist([image2], [0], None, [256], [0.0, 255.0])
    # 能夠比較下直方圖
    plt.plot(range(256), hist1, 'r')
    plt.plot(range(256), hist2, 'b')
    plt.show()
    # 計算直方圖的重合度
    degree = 0
    for i in range(len(hist1)):
        if hist1[i] != hist2[i]:
            degree = degree + (1 - abs(hist1[i] - hist2[i]) / max(hist1[i], hist2[i]))
        else:
            degree = degree + 1
    degree = degree / len(hist1)
    return degree


# 計算單通道的直方圖的類似值
def calculate(image1, image2):
    hist1 = cv2.calcHist([image1], [0], None, [256], [0.0, 255.0])
    hist2 = cv2.calcHist([image2], [0], None, [256], [0.0, 255.0])
    # 計算直方圖的重合度
    degree = 0
    for i in range(len(hist1)):
        if hist1[i] != hist2[i]:
            degree = degree + (1 - abs(hist1[i] - hist2[i]) / max(hist1[i], hist2[i]))
        else:
            degree = degree + 1
    degree = degree / len(hist1)
    return degree


# 經過獲得每一個通道的直方圖來計算類似度
def classify_hist_with_split(image1, image2, size=(256, 256)):
    # 將圖像resize後,分離爲三個通道,再計算每一個通道的類似值
    image1 = cv2.resize(image1, size)
    image2 = cv2.resize(image2, size)
    sub_image1 = cv2.split(image1)
    sub_image2 = cv2.split(image2)
    sub_data = 0
    for im1, im2 in zip(sub_image1, sub_image2):
        sub_data += calculate(im1, im2)
    sub_data = sub_data / 3
    return sub_data


# 平均哈希算法計算
def classify_aHash(image1, image2):
    image1 = cv2.resize(image1, (8, 8))
    image2 = cv2.resize(image2, (8, 8))
    gray1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
    gray2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
    hash1 = getHash(gray1)
    hash2 = getHash(gray2)
    return Hamming_distance(hash1, hash2)


def classify_pHash(image1, image2):
    image1 = cv2.resize(image1, (32, 32))
    image2 = cv2.resize(image2, (32, 32))
    gray1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
    gray2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
    # 將灰度圖轉爲浮點型,再進行dct變換
    dct1 = cv2.dct(np.float32(gray1))
    dct2 = cv2.dct(np.float32(gray2))
    # 取左上角的8*8,這些表明圖片的最低頻率
    # 這個操做等價於c++中利用opencv實現的掩碼操做
    # 在python中進行掩碼操做,能夠直接這樣取出圖像矩陣的某一部分
    dct1_roi = dct1[0:8, 0:8]
    dct2_roi = dct2[0:8, 0:8]
    hash1 = getHash(dct1_roi)
    hash2 = getHash(dct2_roi)
    return Hamming_distance(hash1, hash2)


# 輸入灰度圖,返回hash
def getHash(image):
    avreage = np.mean(image)
    hash = []
    for i in range(image.shape[0]):
        for j in range(image.shape[1]):
            if image[i, j] > avreage:
                hash.append(1)
            else:
                hash.append(0)
    return hash


# 計算漢明距離
def Hamming_distance(hash1, hash2):
    num = 0
    for index in range(len(hash1)):
        if hash1[index] != hash2[index]:
            num += 1
    return num


if __name__ == '__main__':
    face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt2.xml')
    face_cascade.load('F:\pycharm\py2_7\Lib\site-packages\cv2\data\haarcascade_frontalface_default.xml')
    scaling_factor = 0.5
    img1 = cv2.imread('d://68.jpg')
    #img1 = cv2.resize(img1,(300,300))
    img1 = cv2.resize(img1, None, fx=scaling_factor * 3, fy=scaling_factor * 3, interpolation=cv2.INTER_AREA)
    gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    face_rects = face_cascade.detectMultiScale(gray, 1.1, 5)
    for (x, y, w, h) in face_rects:
        img1 = img1[y:y + h,x:x + w]
        #cv2.rectangle(img1, (x, y), (x + w, y + h), (0, 255, 0), 3)

    img2 = cv2.imread('d://69.jpg')
    #img2 = cv2.resize(img2,(300,300))
    img2 = cv2.resize(img2, None, fx=scaling_factor * 3, fy=scaling_factor * 3, interpolation=cv2.INTER_AREA)
    gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
    face_rects2 = face_cascade.detectMultiScale(gray2, 1.1, 5)
    for (x, y, w, h) in face_rects2:
        img2 = img2[y:y + h,x:x + w]
        #cv2.rectangle(img2, (x, y), (x + w, y + h), (0, 255, 0), 3)
    #degree = classify_gray_hist(img1, img2)
    degree = classify_hist_with_split(img1,img2)
    #degree = classify_aHash(img1,img2)
    #degree = classify_pHash(img1,img2)
    out = format('類似度:%f' % (float)(degree*100))
    print out
    img1 = cv2.resize(img1, (300, 300))
    cv2.imshow('img1', img1)
    img2 = cv2.resize(img2, (300, 300))
    cv2.imshow('img2',img2)

    #degree = classify_hist_with_split(img1,img2)
    # degree = classify_aHash(img1,img2)
    # degree = classify_pHash(img1,img2)
    #print degree
    cv2.waitKey(0)
相關文章
相關標籤/搜索