Python+OpenCV实现实时眼动追踪的示例代码
使用Python+OpenCV实现实时眼动追踪,不需要高端硬件简单摄像头即可实现,效果图如下所示。
项目演示参见:https://www.bilibili.com/video/av75181965/
项目主程序如下:
importsys
importcv2
importnumpyasnp
importprocess
fromPyQt5.QtCoreimportQTimer
fromPyQt5.QtWidgetsimportQApplication,QMainWindow
fromPyQt5.uicimportloadUi
fromPyQt5.QtGuiimportQPixmap,QImage
classWindow(QMainWindow):
def__init__(self):
super(Window,self).__init__()
loadUi('GUImain.ui',self)
withopen("style.css","r")ascss:
self.setStyleSheet(css.read())
self.face_decector,self.eye_detector,self.detector=process.init_cv()
self.startButton.clicked.connect(self.start_webcam)
self.stopButton.clicked.connect(self.stop_webcam)
self.camera_is_running=False
self.previous_right_keypoints=None
self.previous_left_keypoints=None
self.previous_right_blob_area=None
self.previous_left_blob_area=None
defstart_webcam(self):
ifnotself.camera_is_running:
self.capture=cv2.VideoCapture(cv2.CAP_DSHOW)#VideoCapture(0)sometimesdropserror#-1072875772
ifself.captureisNone:
self.capture=cv2.VideoCapture(0)
self.camera_is_running=True
self.timer=QTimer(self)
self.timer.timeout.connect(self.update_frame)
self.timer.start(2)
defstop_webcam(self):
ifself.camera_is_running:
self.capture.release()
self.timer.stop()
self.camera_is_running=notself.camera_is_running
defupdate_frame(self):#logicofthemainloop
_,base_image=self.capture.read()
self.display_image(base_image)
processed_image=cv2.cvtColor(base_image,cv2.COLOR_RGB2GRAY)
face_frame,face_frame_gray,left_eye_estimated_position,right_eye_estimated_position,_,_=process.detect_face(
base_image,processed_image,self.face_decector)
ifface_frameisnotNone:
left_eye_frame,right_eye_frame,left_eye_frame_gray,right_eye_frame_gray=process.detect_eyes(face_frame,
face_frame_gray,
left_eye_estimated_position,
right_eye_estimated_position,
self.eye_detector)
ifright_eye_frameisnotNone:
ifself.rightEyeCheckbox.isChecked():
right_eye_threshold=self.rightEyeThreshold.value()
right_keypoints,self.previous_right_keypoints,self.previous_right_blob_area=self.get_keypoints(
right_eye_frame,right_eye_frame_gray,right_eye_threshold,
previous_area=self.previous_right_blob_area,
previous_keypoint=self.previous_right_keypoints)
process.draw_blobs(right_eye_frame,right_keypoints)
right_eye_frame=np.require(right_eye_frame,np.uint8,'C')
self.display_image(right_eye_frame,window='right')
ifleft_eye_frameisnotNone:
ifself.leftEyeCheckbox.isChecked():
left_eye_threshold=self.leftEyeThreshold.value()
left_keypoints,self.previous_left_keypoints,self.previous_left_blob_area=self.get_keypoints(
left_eye_frame,left_eye_frame_gray,left_eye_threshold,
previous_area=self.previous_left_blob_area,
previous_keypoint=self.previous_left_keypoints)
process.draw_blobs(left_eye_frame,left_keypoints)
left_eye_frame=np.require(left_eye_frame,np.uint8,'C')
self.display_image(left_eye_frame,window='left')
ifself.pupilsCheckbox.isChecked():#drawskeypointsonpupilsonmainwindow
self.display_image(base_image)
defget_keypoints(self,frame,frame_gray,threshold,previous_keypoint,previous_area):
keypoints=process.process_eye(frame_gray,threshold,self.detector,
prevArea=previous_area)
ifkeypoints:
previous_keypoint=keypoints
previous_area=keypoints[0].size
else:
keypoints=previous_keypoint
returnkeypoints,previous_keypoint,previous_area
defdisplay_image(self,img,window='main'):
#MakesOpenCVimagesdisplayableonPyQT,displaysthem
qformat=QImage.Format_Indexed8
iflen(img.shape)==3:
ifimg.shape[2]==4:#RGBA
qformat=QImage.Format_RGBA8888
else:#RGB
qformat=QImage.Format_RGB888
out_image=QImage(img,img.shape[1],img.shape[0],img.strides[0],qformat)#BGRtoRGB
out_image=out_image.rgbSwapped()
ifwindow=='main':#mainwindow
self.baseImage.setPixmap(QPixmap.fromImage(out_image))
self.baseImage.setScaledContents(True)
ifwindow=='left':#lefteyewindow
self.leftEyeBox.setPixmap(QPixmap.fromImage(out_image))
self.leftEyeBox.setScaledContents(True)
ifwindow=='right':#righteyewindow
self.rightEyeBox.setPixmap(QPixmap.fromImage(out_image))
self.rightEyeBox.setScaledContents(True)
if__name__=="__main__":
app=QApplication(sys.argv)
window=Window()
window.setWindowTitle("GUI")
window.show()
sys.exit(app.exec_())
人眼检测程序如下:
importos
importcv2
importnumpyasnp
definit_cv():
"""loadsallofcv2tools"""
face_detector=cv2.CascadeClassifier(
os.path.join("Classifiers","haar","haarcascade_frontalface_default.xml"))
eye_detector=cv2.CascadeClassifier(os.path.join("Classifiers","haar",'haarcascade_eye.xml'))
detector_params=cv2.SimpleBlobDetector_Params()
detector_params.filterByArea=True
detector_params.maxArea=1500
detector=cv2.SimpleBlobDetector_create(detector_params)
returnface_detector,eye_detector,detector
defdetect_face(img,img_gray,cascade):
"""
Detectsallfaces,ifmultiplefound,workswiththebiggest.Returnsthefollowingparameters:
1.Thefaceframe
2.Agrayversionofthefaceframe
2.Estimatedlefteyecoordinatesrange
3.Estimatedrighteyecoordinatesrange
5.Xofthefaceframe
6.Yofthefaceframe
"""
coords=cascade.detectMultiScale(img,1.3,5)
iflen(coords)>1:
biggest=(0,0,0,0)
foriincoords:
ifi[3]>biggest[3]:
biggest=i
biggest=np.array([i],np.int32)
eliflen(coords)==1:
biggest=coords
else:
returnNone,None,None,None,None,None
for(x,y,w,h)inbiggest:
frame=img[y:y+h,x:x+w]
frame_gray=img_gray[y:y+h,x:x+w]
lest=(int(w*0.1),int(w*0.45))
rest=(int(w*0.55),int(w*0.9))
X=x
Y=y
returnframe,frame_gray,lest,rest,X,Y
defdetect_eyes(img,img_gray,lest,rest,cascade):
"""
:paramimg:imageframe
:paramimg_gray:grayimageframe
:paramlest:lefteyeestimatedposition,neededtofilteroutnostril,knowwhateyeisfound
:paramrest:righteyeestimatedposition
:paramcascade:Hhaarcascade
:return:coloredandgrayscaleversionsofeyeframes
"""
leftEye=None
rightEye=None
leftEyeG=None
rightEyeG=None
coords=cascade.detectMultiScale(img_gray,1.3,5)
ifcoordsisNoneorlen(coords)==0:
pass
else:
for(x,y,w,h)incoords:
eyecenter=int(float(x)+(float(w)/float(2)))
iflest[0]1:
tmp=1000
forkeypointinkeypoints:#filteroutoddblobs
ifabs(keypoint.size-prevArea)
以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持毛票票。
声明:本文内容来源于网络,版权归原作者所有,内容由互联网用户自发贡献自行上传,本网站不拥有所有权,未作人工编辑处理,也不承担相关法律责任。如果您发现有涉嫌版权的内容,欢迎发送邮件至:czq8825#qq.com(发邮件时,请将#更换为@)进行举报,并提供相关证据,一经查实,本站将立刻删除涉嫌侵权内容。