python+opencv+dlib+pyqt5人臉識別實踐

關於

本項目基於python,用到了opencv、dlib等開發包,實現了單張訓練樣本的人臉識別,有較爲友好的UI界面,識別速度經優化後尚可。python

實現上思路與這篇博客一致,解決了這篇博客中存在的幾個問題:git

  • 每次識別都須要把每一個人的人臉樣本的特徵向量計算一遍,人臉庫小的時候看不出差距,但人臉庫很大時就顯得很是不方便(實測的時候用的400人的人臉庫,提取特徵就要耗費1分多鐘)。
  • 用dlib中的方法進行人臉檢測的時候速度很慢。
  • 沒有友好的UI界面。

基於以上問題,採起的解決方案以下:github

  • 把人臉識別過程分爲兩階段:一、提取人臉特徵 二、人臉檢測+計算歐氏距離;兩個階段分別單獨執行,便可加快人臉識別時候的速度。
  • 換人臉檢測的方法,用opencv中自帶的人臉檢測器進行人臉檢測。
  • 用pyqt5繪製界面。

準備工做

  • python 3.6.5
  • opencv 3.3.1
  • numpy 1.14.3
  • dlib 19.7.0 (配置方法可見這篇文章
  • pycharm+pyqt5 (配置方法可見這篇文章

工程目錄結構

  • E:\
    • candidate_face
      • liuyifei.jpg
      • tangyan.jpg
      • yangmi.jpg
      • yangzi.jpg
      • zhoudongyu.jpg
    • pyproject
      • shape_predictor_68_face_landmarks.dat
      • dlib_face_recognition_resnet_model_v1.dat
      • lbpcascade_frontalface_improved.xml(用於人臉檢測,在opencv目錄opencv-3.3.1-py36h20b85fd_1\Library\etc\lbpcascades下)
      • FaceRecognition.py
      • main.py
    • test.jpg

人臉識別過程

首先運行FaceRecognition.py,會經歷:讀取人臉集、人臉檢測、提取人臉特徵向量、存儲人臉特徵向量、對單張測試人臉圖進行識別、打印識別結果 的過程。
而後運行main.py,會經歷:初始化界面、加載各類參數模型、手動選擇單張人臉測試圖、基於opencv人臉檢測器檢測人臉、提取該人臉特徵向量、計算歐氏距離、找出距離最小的人臉標籤、輸出人臉識別結果 這麼幾個過程。ruby

識別結果

這裏寫圖片描述
這裏寫圖片描述

閒話

這個項目目前bug多多,準確率也還有很大提升空間,歡迎各位多多交流,這個項目的更多詳情請見這裏,若是對你也有所幫助,請輕輕點個star(別過重了,怕疼(⊙o⊙)…)。app

源碼怎會少

  • FaceRecognition.py
import os,dlib,numpy,cv2

predictor_path = 'shape_predictor_68_face_landmarks.dat'
face_rc_model_path = 'dlib_face_recognition_resnet_model_v1.dat'
face_folder_path = 'E:\candidate_face'
test_img_path = 'E:\\test.jpg'


# 讀取人臉集、人臉標籤
def read_data(path):
    try:
        pic_name_list = os.listdir(path)
        pic_list = []
        for i in pic_name_list:
            whole_path = os.path.join(path, i)
            img = cv2.imread(whole_path)
            pic_list.append(img)
    except IOError:
        print('read error')
        return False
    else:
        print('read successfully')
        return pic_name_list, pic_list

# 人臉檢測器
detector = dlib.get_frontal_face_detector()

# 關鍵點檢測器
feature_point = dlib.shape_predictor(predictor_path)

# 人臉參數模型
feature_model = dlib.face_recognition_model_v1(face_rc_model_path)

# 候選人特徵向量列表
descriptors = []

if __name__ == '__main__':
    name_list, pic_list = read_data(face_folder_path)
    num = 1
    for i in pic_list:
        # 人臉檢測
        dets = detector(i, 1)

        for k, d in enumerate(dets):
            # 關鍵點檢測
            shape = feature_point(i, d)

            # 提取特徵,128維
            face_feature = feature_model.compute_face_descriptor(i, shape)
            v = numpy.array(face_feature)

            descriptors.append(v)
            print('人臉特徵提取,第 %d 我的' % num)
            num += 1
    # 特徵向量列表存入文件
    numpy.save('vectors.npy', descriptors)

    ''' 對單張人臉進行識別 '''
    test_img = cv2.imread(test_img_path)
    dets = detector(test_img, 1)
    for k, d in enumerate(dets):
        shape = feature_point(test_img, d)
        test_feature = feature_model.compute_face_descriptor(test_img, shape)
        test_feature = numpy.array(test_feature)

dist = []
count = 0
for i in descriptors:
    dist_ = numpy.linalg.norm(i-test_feature)
    print('%s : %f' % (name_list[count], dist_))
    dist.append(dist_)
    count += 1

min_dist = numpy.argmin(dist)
result = name_list[min_dist][:-4]
print(result)
  • main.py
# -*- coding: utf-8 -*-

import sys, os, numpy, cv2, dlib
from PyQt5 import QtCore, QtGui, QtWidgets

class Ui_Dialog(object):
    def setupUi(self, Dialog):
        Dialog.setObjectName("Dialog")
        Dialog.resize(699, 300)
        self.toolButton = QtWidgets.QToolButton(Dialog)
        self.toolButton.setGeometry(QtCore.QRect(390, 10, 31, 21))
        self.toolButton.setObjectName("toolButton")
        self.pushButton = QtWidgets.QPushButton(Dialog)
        self.pushButton.setGeometry(QtCore.QRect(460, 10, 75, 23))
        self.pushButton.setObjectName("pushButton")
        self.lineEdit = QtWidgets.QLineEdit(Dialog)
        self.lineEdit.setGeometry(QtCore.QRect(130, 10, 251, 20))
        self.lineEdit.setObjectName("lineEdit")
        self.graphicsView = QtWidgets.QGraphicsView(Dialog)
        self.graphicsView.setGeometry(QtCore.QRect(290, 41, 251, 241))
        self.graphicsView.setObjectName("graphicsView")
        self.label = QtWidgets.QLabel(Dialog)
        self.label.setGeometry(QtCore.QRect(290, 41, 251, 241))
        self.label.setObjectName("label")
        self.label.setScaledContents(True) #label自適應圖片大小
        self.graphicsView_2 = QtWidgets.QGraphicsView(Dialog)
        self.graphicsView_2.setGeometry(QtCore.QRect(10, 40, 256, 241))
        self.graphicsView_2.setObjectName("graphicsView_2")
        self.label_2 = QtWidgets.QLabel(Dialog)
        self.label_2.setGeometry(QtCore.QRect(13, 41, 251, 241))
        self.label_2.setObjectName("label_2")
        self.label_2.setScaledContents(True) #label自適應圖片大小
        self.label_3 = QtWidgets.QLabel(Dialog)
        self.label_3.setGeometry(QtCore.QRect(10, 10, 111, 21))
        font = QtGui.QFont()
        font.setFamily("Agency FB")
        font.setPointSize(11)
        self.label_3.setFont(font)
        self.label_3.setObjectName("label_3")
        self.label_4 = QtWidgets.QLabel(Dialog)
        self.label_4.setGeometry(QtCore.QRect(550, 210, 81, 21))
        font = QtGui.QFont()
        font.setFamily("Agency FB")
        font.setPointSize(11)
        self.label_4.setFont(font)
        self.label_4.setObjectName("label_4")
        self.lineEdit_2 = QtWidgets.QLineEdit(Dialog)
        self.lineEdit_2.setGeometry(QtCore.QRect(550, 240, 141, 20))
        font = QtGui.QFont()
        font.setFamily("Agency FB")
        font.setPointSize(11)
        self.lineEdit_2.setFont(font)
        self.lineEdit_2.setObjectName("lineEdit_2")

        self.retranslateUi(Dialog)
        QtCore.QMetaObject.connectSlotsByName(Dialog)

    def retranslateUi(self, Dialog):
        _translate = QtCore.QCoreApplication.translate
        Dialog.setWindowTitle(_translate("Dialog", "龜速人臉識別demo"))
        self.toolButton.setText(_translate("Dialog", "..."))
        self.pushButton.setText(_translate("Dialog", "開始識別"))
        self.lineEdit.setText(_translate("Dialog", "E:/"))
        self.label.setText(_translate("Dialog", "識別結果"))
        self.label_2.setText(_translate("Dialog", "待測人臉"))
        self.label_3.setText(_translate("Dialog", "待測人照片路徑:"))
        self.label_4.setText(_translate("Dialog", "識別結果:"))


class Myshow(QtWidgets.QWidget, Ui_Dialog):
    def __init__(self):
        super(Myshow, self).__init__()
        self.setupUi(self)
        self.pushButton.clicked.connect(self.Recognition)
        self.toolButton.clicked.connect(self.ChoosePath)

        self.predictor_path = 'shape_predictor_68_face_landmarks.dat'
        self.face_rc_model_path = 'dlib_face_recognition_resnet_model_v1.dat'
        self.face_folder_path = 'E:\candidate_face'

        self.name_list = os.listdir(self.face_folder_path)
        self.descriptors = numpy.load('vectors.npy')

        # dlib方法檢測人臉
        # self.detector = dlib.get_frontal_face_detector()

        # opencv方法檢測人臉
        self.face_cascade = cv2.CascadeClassifier('lbpcascade_frontalface_improved.xml')

        self.feature_point = dlib.shape_predictor(self.predictor_path)
        self.feature_model = dlib.face_recognition_model_v1(self.face_rc_model_path)
        self.test_path = 'E:/'

    def ChoosePath(self):
        file_name = QtWidgets.QFileDialog.getOpenFileName(self, "open file dialog", self.test_path, "圖片(*.jpg)")
        print(file_name[0])
        self.test_path = file_name[0]
        self.lineEdit.setText(self.test_path)
        self.label_2.setPixmap(QtGui.QPixmap(self.test_path)) #顯示待測人臉圖

        # 清空不相關內容
        self.label.clear()
        self.lineEdit_2.clear()

    def Recognition(self):
        test_img = cv2.imread(self.test_path)

        # dlib方法檢測人臉
        # dets = self.detector(test_img, 1)
        # for k, d in enumerate(dets):
        # shape = self.feature_point(test_img, d)
        # test_feature = self.feature_model.compute_face_descriptor(test_img, shape)
        # test_feature = numpy.array(test_feature)

        # opencv方法檢測人臉
        gray = cv2.cvtColor(test_img, cv2.COLOR_BGR2GRAY)
        dets = self.face_cascade.detectMultiScale(gray, 1.1, 6)
        mark = 0
        for (x, y, w, h) in dets:
            mark = 1
            d = dlib.rectangle(numpy.long(x),numpy.long(y),numpy.long(x+w),numpy.long(y+h))
            shape = self.feature_point(test_img, d)
            test_feature = self.feature_model.compute_face_descriptor(test_img, shape)
            test_feature = numpy.array(test_feature)

        if mark == 1:
            dist = []
            count = 0
            for i in self.descriptors:
                dist_ = numpy.linalg.norm(i - test_feature)
                print('%s : %f' % (self.name_list[count], dist_))
                dist.append(dist_)
                count += 1

            min_dist = numpy.argmin(dist)
            print('%s' % self.name_list[min_dist][:-4])

            show_img_path = os.path.join(self.face_folder_path, self.name_list[min_dist])
            self.label.setPixmap(QtGui.QPixmap(show_img_path)) #顯示人臉識別結果圖
            self.lineEdit_2.setText(self.name_list[min_dist][:-4])
        else :
            self.lineEdit_2.setText('haven\'t find any people')

if __name__ == '__main__':
    app = QtWidgets.QApplication(sys.argv)
    w = Myshow()
    w.show()
    sys.exit(app.exec_())
相關文章
相關標籤/搜索