-
Notifications
You must be signed in to change notification settings - Fork 0
/
camera.py
32 lines (24 loc) · 1020 Bytes
/
camera.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
import cv2
from model import FacialExpressionModel
import numpy as np
facec = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
model = FacialExpressionModel("model.json", "model_weights.h5")
font = cv2.FONT_HERSHEY_SIMPLEX
class VideoCamera(object):
def __init__(self):
self.video = cv2.VideoCapture(0)
def __del__(self):
self.video.release()
# returns camera frames along with bounding boxes and predictions
def get_frame(self):
_, fr = self.video.read()
gray_fr = cv2.cvtColor(fr, cv2.COLOR_BGR2GRAY)
faces = facec.detectMultiScale(gray_fr, 1.3, 5)
for (x, y, w, h) in faces:
fc = gray_fr[y:y+h, x:x+w]
roi = cv2.resize(fc, (48, 48))
pred = model.predict_emotion(roi[np.newaxis, :, :, np.newaxis])
cv2.putText(fr, pred, (x, y), font, 1, (255, 255, 0), 2)
cv2.rectangle(fr,(x,y),(x+w,y+h),(255,0,0),2)
_, jpeg = cv2.imencode('.jpg', fr)
return jpeg.tobytes()