This repository has been archived by the owner on May 13, 2021. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 22
/
yawn.py
106 lines (80 loc) · 2.96 KB
/
yawn.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
import cv2
import dlib
import numpy as np
PREDICTOR_PATH = "shape_predictor_68_face_landmarks.dat"
predictor = dlib.shape_predictor(PREDICTOR_PATH)
#cascade_path='haarcascade_frontalface_default.xml'
#cascade = cv2.CascadeClassifier(cascade_path)
detector = dlib.get_frontal_face_detector()
def get_landmarks(im):
rects = detector(im, 1)
if len(rects) > 1:
return "error"
if len(rects) == 0:
return "error"
return np.matrix([[p.x, p.y] for p in predictor(im, rects[0]).parts()])
def annotate_landmarks(im, landmarks):
im = im.copy()
for idx, point in enumerate(landmarks):
pos = (point[0, 0], point[0, 1])
cv2.putText(im, str(idx), pos,
fontFace=cv2.FONT_HERSHEY_SCRIPT_SIMPLEX,
fontScale=0.4,
color=(0, 0, 255))
cv2.circle(im, pos, 3, color=(0, 255, 255))
return im
def top_lip(landmarks):
top_lip_pts = []
for i in range(50,53):
top_lip_pts.append(landmarks[i])
for i in range(61,64):
top_lip_pts.append(landmarks[i])
top_lip_all_pts = np.squeeze(np.asarray(top_lip_pts))
top_lip_mean = np.mean(top_lip_pts, axis=0)
return int(top_lip_mean[:,1])
def bottom_lip(landmarks):
bottom_lip_pts = []
for i in range(65,68):
bottom_lip_pts.append(landmarks[i])
for i in range(56,59):
bottom_lip_pts.append(landmarks[i])
bottom_lip_all_pts = np.squeeze(np.asarray(bottom_lip_pts))
bottom_lip_mean = np.mean(bottom_lip_pts, axis=0)
return int(bottom_lip_mean[:,1])
def mouth_open(image):
landmarks = get_landmarks(image)
if landmarks == "error":
return image, 0
image_with_landmarks = annotate_landmarks(image, landmarks)
top_lip_center = top_lip(landmarks)
bottom_lip_center = bottom_lip(landmarks)
lip_distance = abs(top_lip_center - bottom_lip_center)
return image_with_landmarks, lip_distance
#cv2.imshow('Result', image_with_landmarks)
#cv2.imwrite('image_with_landmarks.jpg',image_with_landmarks)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
cap = cv2.VideoCapture(0)
yawns = 0
yawn_status = False
while True:
ret, frame = cap.read()
image_landmarks, lip_distance = mouth_open(frame)
prev_yawn_status = yawn_status
if lip_distance > 40:
yawn_status = True
cv2.putText(frame, "Subject is Yawning", (50,450),
cv2.FONT_HERSHEY_COMPLEX, 1,(0,0,255),2)
output_text = " Yawn Count: " + str(yawns + 1)
cv2.putText(frame, output_text, (50,50),
cv2.FONT_HERSHEY_COMPLEX, 1,(0,255,127),2)
else:
yawn_status = False
if prev_yawn_status == True and yawn_status == False:
yawns += 1
cv2.imshow('Live Landmarks', image_landmarks )
cv2.imshow('Yawn Detection', frame )
if cv2.waitKey(1) == 13: #13 is the Enter Key
break
cap.release()
cv2.destroyAllWindows()