Python+OpenCV实现实时眼动追踪的示例代码


Posted in Python onNovember 11, 2019

使用Python+OpenCV实现实时眼动追踪,不需要高端硬件简单摄像头即可实现,效果图如下所示。

Python+OpenCV实现实时眼动追踪的示例代码 

项目演示参见:https://www.bilibili.com/video/av75181965/

项目主程序如下:

import sys
import cv2
import numpy as np
import process
from PyQt5.QtCore import QTimer
from PyQt5.QtWidgets import QApplication, QMainWindow
from PyQt5.uic import loadUi
from PyQt5.QtGui import QPixmap, QImage
 
 
class Window(QMainWindow):
  def __init__(self):
    super(Window, self).__init__()
    loadUi('GUImain.ui', self)
    with open("style.css", "r") as css:
      self.setStyleSheet(css.read())
    self.face_decector, self.eye_detector, self.detector = process.init_cv()
    self.startButton.clicked.connect(self.start_webcam)
    self.stopButton.clicked.connect(self.stop_webcam)
    self.camera_is_running = False
    self.previous_right_keypoints = None
    self.previous_left_keypoints = None
    self.previous_right_blob_area = None
    self.previous_left_blob_area = None
 
  def start_webcam(self):
    if not self.camera_is_running:
      self.capture = cv2.VideoCapture(cv2.CAP_DSHOW) # VideoCapture(0) sometimes drops error #-1072875772
      if self.capture is None:
        self.capture = cv2.VideoCapture(0)
      self.camera_is_running = True
      self.timer = QTimer(self)
      self.timer.timeout.connect(self.update_frame)
      self.timer.start(2)
 
  def stop_webcam(self):
    if self.camera_is_running:
      self.capture.release()
      self.timer.stop()
      self.camera_is_running = not self.camera_is_running
 
  def update_frame(self): # logic of the main loop
 
    _, base_image = self.capture.read()
    self.display_image(base_image)
 
    processed_image = cv2.cvtColor(base_image, cv2.COLOR_RGB2GRAY)
 
    face_frame, face_frame_gray, left_eye_estimated_position, right_eye_estimated_position, _, _ = process.detect_face(
      base_image, processed_image, self.face_decector)
 
    if face_frame is not None:
      left_eye_frame, right_eye_frame, left_eye_frame_gray, right_eye_frame_gray = process.detect_eyes(face_frame,
                                                       face_frame_gray,
                                                       left_eye_estimated_position,
                                                       right_eye_estimated_position,
                                                       self.eye_detector)
 
      if right_eye_frame is not None:
        if self.rightEyeCheckbox.isChecked():
          right_eye_threshold = self.rightEyeThreshold.value()
          right_keypoints, self.previous_right_keypoints, self.previous_right_blob_area = self.get_keypoints(
            right_eye_frame, right_eye_frame_gray, right_eye_threshold,
            previous_area=self.previous_right_blob_area,
            previous_keypoint=self.previous_right_keypoints)
          process.draw_blobs(right_eye_frame, right_keypoints)
 
        right_eye_frame = np.require(right_eye_frame, np.uint8, 'C')
        self.display_image(right_eye_frame, window='right')
 
      if left_eye_frame is not None:
        if self.leftEyeCheckbox.isChecked():
          left_eye_threshold = self.leftEyeThreshold.value()
          left_keypoints, self.previous_left_keypoints, self.previous_left_blob_area = self.get_keypoints(
            left_eye_frame, left_eye_frame_gray, left_eye_threshold,
            previous_area=self.previous_left_blob_area,
            previous_keypoint=self.previous_left_keypoints)
          process.draw_blobs(left_eye_frame, left_keypoints)
 
        left_eye_frame = np.require(left_eye_frame, np.uint8, 'C')
        self.display_image(left_eye_frame, window='left')
 
    if self.pupilsCheckbox.isChecked(): # draws keypoints on pupils on main window
      self.display_image(base_image)
 
  def get_keypoints(self, frame, frame_gray, threshold, previous_keypoint, previous_area):
 
    keypoints = process.process_eye(frame_gray, threshold, self.detector,
                    prevArea=previous_area)
    if keypoints:
      previous_keypoint = keypoints
      previous_area = keypoints[0].size
    else:
      keypoints = previous_keypoint
    return keypoints, previous_keypoint, previous_area
 
  def display_image(self, img, window='main'):
    # Makes OpenCV images displayable on PyQT, displays them
    qformat = QImage.Format_Indexed8
    if len(img.shape) == 3:
      if img.shape[2] == 4: # RGBA
        qformat = QImage.Format_RGBA8888
      else: # RGB
        qformat = QImage.Format_RGB888
 
    out_image = QImage(img, img.shape[1], img.shape[0], img.strides[0], qformat) # BGR to RGB
    out_image = out_image.rgbSwapped()
    if window == 'main': # main window
      self.baseImage.setPixmap(QPixmap.fromImage(out_image))
      self.baseImage.setScaledContents(True)
    if window == 'left': # left eye window
      self.leftEyeBox.setPixmap(QPixmap.fromImage(out_image))
      self.leftEyeBox.setScaledContents(True)
    if window == 'right': # right eye window
      self.rightEyeBox.setPixmap(QPixmap.fromImage(out_image))
      self.rightEyeBox.setScaledContents(True)
 
 
if __name__ == "__main__":
  app = QApplication(sys.argv)
  window = Window()
  window.setWindowTitle("GUI")
  window.show()
  sys.exit(app.exec_())

人眼检测程序如下:

import os
import cv2
import numpy as np
 
 
def init_cv():
  """loads all of cv2 tools"""
  face_detector = cv2.CascadeClassifier(
    os.path.join("Classifiers", "haar", "haarcascade_frontalface_default.xml"))
  eye_detector = cv2.CascadeClassifier(os.path.join("Classifiers", "haar", 'haarcascade_eye.xml'))
  detector_params = cv2.SimpleBlobDetector_Params()
  detector_params.filterByArea = True
  detector_params.maxArea = 1500
  detector = cv2.SimpleBlobDetector_create(detector_params)
 
  return face_detector, eye_detector, detector
 
 
def detect_face(img, img_gray, cascade):
  """
  Detects all faces, if multiple found, works with the biggest. Returns the following parameters:
  1. The face frame
  2. A gray version of the face frame
  2. Estimated left eye coordinates range
  3. Estimated right eye coordinates range
  5. X of the face frame
  6. Y of the face frame
  """
  coords = cascade.detectMultiScale(img, 1.3, 5)
 
  if len(coords) > 1:
    biggest = (0, 0, 0, 0)
    for i in coords:
      if i[3] > biggest[3]:
        biggest = i
    biggest = np.array([i], np.int32)
  elif len(coords) == 1:
    biggest = coords
  else:
    return None, None, None, None, None, None
  for (x, y, w, h) in biggest:
    frame = img[y:y + h, x:x + w]
    frame_gray = img_gray[y:y + h, x:x + w]
    lest = (int(w * 0.1), int(w * 0.45))
    rest = (int(w * 0.55), int(w * 0.9))
    X = x
    Y = y
 
  return frame, frame_gray, lest, rest, X, Y
 
 
def detect_eyes(img, img_gray, lest, rest, cascade):
  """
  :param img: image frame
  :param img_gray: gray image frame
  :param lest: left eye estimated position, needed to filter out nostril, know what eye is found
  :param rest: right eye estimated position
  :param cascade: Hhaar cascade
  :return: colored and grayscale versions of eye frames
  """
  leftEye = None
  rightEye = None
  leftEyeG = None
  rightEyeG = None
  coords = cascade.detectMultiScale(img_gray, 1.3, 5)
 
  if coords is None or len(coords) == 0:
    pass
  else:
    for (x, y, w, h) in coords:
      eyecenter = int(float(x) + (float(w) / float(2)))
      if lest[0] < eyecenter and eyecenter < lest[1]:
        leftEye = img[y:y + h, x:x + w]
        leftEyeG = img_gray[y:y + h, x:x + w]
        leftEye, leftEyeG = cut_eyebrows(leftEye, leftEyeG)
      elif rest[0] < eyecenter and eyecenter < rest[1]:
        rightEye = img[y:y + h, x:x + w]
        rightEyeG = img_gray[y:y + h, x:x + w]
        rightEye, rightEye = cut_eyebrows(rightEye, rightEyeG)
      else:
        pass # nostril
  return leftEye, rightEye, leftEyeG, rightEyeG
 
 
def process_eye(img, threshold, detector, prevArea=None):
  """
  :param img: eye frame
  :param threshold: threshold value for threshold function
  :param detector: blob detector
  :param prevArea: area of the previous keypoint(used for filtering)
  :return: keypoints
  """
  _, img = cv2.threshold(img, threshold, 255, cv2.THRESH_BINARY)
  img = cv2.erode(img, None, iterations=2)
  img = cv2.dilate(img, None, iterations=4)
  img = cv2.medianBlur(img, 5)
  keypoints = detector.detect(img)
  if keypoints and prevArea and len(keypoints) > 1:
    tmp = 1000
    for keypoint in keypoints: # filter out odd blobs
      if abs(keypoint.size - prevArea) < tmp:
        ans = keypoint
        tmp = abs(keypoint.size - prevArea)
    keypoints = np.array(ans)
 
  return keypoints
 
def cut_eyebrows(img, imgG):
  height, width = img.shape[:2]
  img = img[15:height, 0:width] # cut eyebrows out (15 px)
  imgG = imgG[15:height, 0:width]
 
  return img, imgG
 
 
def draw_blobs(img, keypoints):
  """Draws blobs"""
  cv2.drawKeypoints(img, keypoints, img, (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持三水点靠木。

Python 相关文章推荐
centos系统升级python 2.7.3
Jul 03 Python
Python set集合类型操作总结
Nov 07 Python
Python装饰器使用示例及实际应用例子
Mar 06 Python
Python基于Tkinter实现的记事本实例
Jun 17 Python
Python中的urllib模块使用详解
Jul 07 Python
pandas 对每一列数据进行标准化的方法
Jun 09 Python
在Pycharm中使用GitHub的方法步骤
Jun 13 Python
Python3.6实现根据电影名称(支持电视剧名称),获取下载链接的方法
Aug 26 Python
Python实现图片裁剪的两种方式(Pillow和OpenCV)
Oct 30 Python
Python读取分割压缩TXT文本文件实例
Feb 14 Python
部署Django到阿里云服务器教程示例
Jun 03 Python
python和php哪个更适合写爬虫
Jun 22 Python
python的pyecharts绘制各种图表详细(附代码)
Nov 11 #Python
python OpenCV GrabCut使用实例解析
Nov 11 #Python
Python上下文管理器用法及实例解析
Nov 11 #Python
Django 请求Request的具体使用方法
Nov 11 #Python
浅谈Python类中的self到底是干啥的
Nov 11 #Python
python 调试冷知识(小结)
Nov 11 #Python
通过 Django Pagination 实现简单分页功能
Nov 11 #Python
You might like
php 用checkbox一次性删除多条记录的方法
2010/02/23 PHP
解析php利用正则表达式解决采集内容排版的问题
2013/06/20 PHP
PHP swfupload图片上传的实例代码
2013/09/30 PHP
php从给定url获取文件扩展名的方法
2015/03/14 PHP
基于php的CMS中展示文章类实例分析
2015/06/18 PHP
php无序树实现方法
2015/07/28 PHP
thinkphp集成前端脚手架Vue-cli的教程图解
2018/08/30 PHP
javascript中的prototype属性实例分析说明
2010/08/09 Javascript
javascript控制swfObject应用介绍
2012/11/29 Javascript
js中的时间转换—毫秒转换成日期时间的示例代码
2014/01/26 Javascript
JS实现自适应高度表单文本框的方法
2015/02/25 Javascript
关于vue中watch检测到不到对象属性的变化的解决方法
2018/02/08 Javascript
layui实现table加载的示例代码
2018/08/14 Javascript
JavaScript时间与时间戳的转换操作实例分析
2018/12/07 Javascript
Vue中通过Vue.extend动态创建实例的方法
2019/08/13 Javascript
微信小程序webview组件交互,内联h5页面并网页实现微信支付实现解析
2019/08/16 Javascript
JavaScript实现答题评分功能页面
2020/06/24 Javascript
详解Typescript里的This的使用方法
2021/01/08 Javascript
深入讨论Python函数的参数的默认值所引发的问题的原因
2015/03/30 Python
Python的Tornado框架异步编程入门实例
2015/04/24 Python
详解Python中的序列化与反序列化的使用
2015/06/30 Python
Python升级导致yum、pip报错的解决方法
2017/09/06 Python
Python3利用SMTP协议发送E-mail电子邮件的方法
2017/09/30 Python
对python 自定义协议的方法详解
2019/02/13 Python
python实现微信自动回复及批量添加好友功能
2019/07/03 Python
使用PyInstaller将Pygame库编写的小游戏程序打包为exe文件及出现问题解决方法
2019/09/06 Python
一行Python代码制作动态二维码的实现
2019/09/09 Python
python 将列表里的字典元素合并为一个字典实例
2020/09/01 Python
Html5新标签解释及用法
2012/02/17 HTML / CSS
意大利灯具购物网站:Lampade.it
2018/10/18 全球购物
Myholidays美国:在线旅游网站
2019/08/16 全球购物
Linux上比较文件的命令都有哪些
2013/09/28 面试题
毕业生求职简历中的自我评价
2013/10/18 职场文书
员工保密承诺书
2014/05/28 职场文书
使用CSS实现小三角边框原理解析
2021/11/07 HTML / CSS
Z-Order加速Hudi大规模数据集方案分析
2022/03/31 Servers