檢測有沒有物體移動ide
import cv2 import time camera = cv2.VideoCapture(0) if camera is None: print('請先鏈接攝像頭') exit() fps = 5 # 幀率 pre_frame = None # 老是取前一幀作爲背景(不用考慮環境影響) play_music = False while True: start = time.time() res, cur_frame = camera.read() if res != True: break end = time.time() seconds = end - start if seconds < 1.0/fps: time.sleep(1.0/fps - seconds) cv2.imshow('img', cur_frame) key = cv2.waitKey(30) & 0xff if key == 27: break gray_img = cv2.cvtColor(cur_frame, cv2.COLOR_BGR2GRAY) gray_img = cv2.resize(gray_img, (500, 500)) gray_img = cv2.GaussianBlur(gray_img, (21, 21), 0) if pre_frame is None: pre_frame = gray_img else: img_delta = cv2.absdiff(pre_frame, gray_img) thresh = cv2.threshold(img_delta, 25, 255, cv2.THRESH_BINARY)[1] thresh = cv2.dilate(thresh, None, iterations=2) image, contours, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for c in contours: if cv2.contourArea(c) < 1000: # 設置敏感度 continue else: #print(cv2.contourArea(c)) print("前一幀和當前幀不同了, 有什麼東西在動!") play_music = True break pre_frame = gray_img camera.release() cv2.destroyAllWindows()
加入人臉識別測試
import cv2 import time save_path = './face/' face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml') camera = cv2.VideoCapture(0) # 參數0表示第一個攝像頭 # 判斷視頻是否打開 if (camera.isOpened()): print('Open') else: print('攝像頭未打開') # 測試用,查看視頻size size = (int(camera.get(cv2.CAP_PROP_FRAME_WIDTH)), int(camera.get(cv2.CAP_PROP_FRAME_HEIGHT))) print('size:'+repr(size)) fps = 5 # 幀率 pre_frame = None # 老是取視頻流前一幀作爲背景相對下一幀進行比較 i = 0 while True: start = time.time() grabbed, frame_lwpCV = camera.read() # 讀取視頻流 gray_lwpCV = cv2.cvtColor(frame_lwpCV, cv2.COLOR_BGR2GRAY) # 轉灰度圖 if not grabbed: break end = time.time() # 人臉檢測部分 faces = face_cascade.detectMultiScale(gray_lwpCV, 1.3, 5) for (x, y, w, h) in faces: cv2.rectangle(frame_lwpCV, (x, y), (x + w, y + h), (255, 0, 0), 2) roi_gray_lwpCV = gray_lwpCV[y:y + h // 2, x:x + w] # 檢出人臉區域後,取上半部分,由於眼睛在上邊啊,這樣精度會高一些 roi_frame_lwpCV = frame_lwpCV[y:y + h // 2, x:x + w] cv2.imwrite(save_path + str(i) + '.jpg', frame_lwpCV[y:y + h, x:x + w]) # 將檢測到的人臉寫入文件 i += 1 eyes = eye_cascade.detectMultiScale(roi_gray_lwpCV, 1.03, 5) # 在人臉區域繼續檢測眼睛 for (ex, ey, ew, eh) in eyes: cv2.rectangle(roi_frame_lwpCV, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2) cv2.imshow('lwpCVWindow', frame_lwpCV) # 運動檢測部分 seconds = end - start if seconds < 1.0 / fps: time.sleep(1.0 / fps - seconds) gray_lwpCV = cv2.resize(gray_lwpCV, (500, 500)) # 用高斯濾波進行模糊處理,進行處理的緣由:每一個輸入的視頻都會因天然震動、光照變化或者攝像頭自己等緣由而產生噪聲。對噪聲進行平滑是爲了不在運動和跟蹤時將其檢測出來。 gray_lwpCV = cv2.GaussianBlur(gray_lwpCV, (21, 21), 0) # 在完成對幀的灰度轉換和平滑後,就可計算與背景幀的差別,並獲得一個差分圖(different map)。還須要應用閾值來獲得一幅黑白圖像,並經過下面代碼來膨脹(dilate)圖像,從而對孔(hole)和缺陷(imperfection)進行歸一化處理 if pre_frame is None: pre_frame = gray_lwpCV else: img_delta = cv2.absdiff(pre_frame, gray_lwpCV) thresh = cv2.threshold(img_delta, 25, 255, cv2.THRESH_BINARY)[1] thresh = cv2.dilate(thresh, None, iterations=2) image, contours, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for c in contours: if cv2.contourArea(c) < 1000: # 設置敏感度 continue else: print("咦,有什麼東西在動") break pre_frame = gray_lwpCV key = cv2.waitKey(1) & 0xFF # 按'q'健退出循環 if key == ord('q'): break # When everything done, release the capture camera.release() cv2.destroyAllWindows()
用同事作了一下實驗,hahahahhhhspa
附件code
https://files.cnblogs.com/files/botoo/%E6%96%87%E4%BB%B6.rar視頻