-
Notifications
You must be signed in to change notification settings - Fork 2
/
face-detection-demo.py
93 lines (75 loc) · 5.38 KB
/
face-detection-demo.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import cv2
import mediapipe as mp
import numpy as np
detection = mp.solutions.face_detection
drawing = mp.solutions.drawing_utils
input = cv2.VideoCapture(0)
with detection.FaceDetection(model_selection = 0,min_detection_confidence=0.5) as faceDetection:
while input.isOpened():
success, frame = input.read()
imageHeight, imageWidth = frame.shape[0], frame.shape[1]
if not success:
print("Cannot get frame")
# To improve performance, optionally mark the frame as not writeable to
# pass by reference.
frame.flags.writeable = False
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
results = faceDetection.process(frame)
# Set the frane as writable and convert to normal colour
frame.flags.writeable = True
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
if results.detections: #if face detected
drawing.draw_detection(frame, results.detections[0]) #draw detections if any
noseX = detection.get_key_point(results.detections[0], detection.FaceKeyPoint.NOSE_TIP).x #get coords for face points
noseY = detection.get_key_point(results.detections[0], detection.FaceKeyPoint.NOSE_TIP).y
lEarX = detection.get_key_point(results.detections[0], detection.FaceKeyPoint.LEFT_EAR_TRAGION).x
lEarY = detection.get_key_point(results.detections[0], detection.FaceKeyPoint.LEFT_EAR_TRAGION).y
rEarX = detection.get_key_point(results.detections[0], detection.FaceKeyPoint.RIGHT_EAR_TRAGION).x
rEarY = detection.get_key_point(results.detections[0], detection.FaceKeyPoint.RIGHT_EAR_TRAGION).y
lEyeX = detection.get_key_point(results.detections[0], detection.FaceKeyPoint.LEFT_EYE).x
lEyeY = detection.get_key_point(results.detections[0], detection.FaceKeyPoint.LEFT_EYE).y
rEyeX = detection.get_key_point(results.detections[0], detection.FaceKeyPoint.RIGHT_EYE).x
rEyeY = detection.get_key_point(results.detections[0], detection.FaceKeyPoint.RIGHT_EYE).y
mouthX = detection.get_key_point(results.detections[0], detection.FaceKeyPoint.MOUTH_CENTER).x
mouthY = detection.get_key_point(results.detections[0], detection.FaceKeyPoint.MOUTH_CENTER).y
faceX = (noseX + lEarX + rEarX + lEyeX + rEyeX + mouthX) / 6 #get overall face coords by averaging face point coords
faceY = (noseY + lEarY + rEarY + lEyeY + rEyeY + mouthY) / 6
standardDeviationX = 0
for x in range(5):
standardDeviationX += np.power(np.abs(detection.get_key_point(results.detections[0], x).x - faceX), 2)
standardDeviationX = np.power(standardDeviationX / 6, 0.5)
standardDeviationY = 0
for y in range(5):
standardDeviationY += np.power(np.abs(detection.get_key_point(results.detections[0], x).x - faceX), 2)
standardDeviationY = np.power(standardDeviationY / 6, 0.5)
print("X: " + str(standardDeviationX) + " Y: " + str(standardDeviationY))
mouthNoseDiff = (mouthY - noseY) * 20
print("MNDif: " + str(mouthNoseDiff))
cv2.line(frame, (int(faceX * imageWidth), imageHeight), (int(faceX * imageWidth), 0), (255,0,0), 2) #draws lines representing these coords
cv2.line(frame, (imageWidth, int(faceY * imageHeight)), (0, int(faceY * imageHeight)), (255,0,0), 2)
if faceX < 0.33333: #draw a rectangle based on which third the face is in
if faceY < 0.33333: #top right
cv2.rectangle(frame, (0,0), (int(imageWidth * 0.33333), int(imageHeight * 0.33333)), (0, 255, 0), 2)
elif faceY > 0.66666: #bottom right
cv2.rectangle(frame, (0, imageHeight), (int(imageWidth * 0.33333), int(imageHeight * 0.66666)), (0, 255, 0), 2)
else: #middle right
cv2.rectangle(frame, (0,int(imageHeight*0.66666)), (int(imageWidth * 0.33333), int(imageHeight * 0.33333)), (0, 255, 0), 2)
elif faceX > 0.66666:
if faceY < 0.33333: #top left
cv2.rectangle(frame, (int(imageWidth),0), (int(imageWidth * 0.66666), int(imageHeight * 0.33333)), (0, 255, 0), 2)
elif faceY > 0.66666: #bottom left
cv2.rectangle(frame, (int(imageWidth), imageHeight), (int(imageWidth * 0.66666), int(imageHeight * 0.66666)), (0, 255, 0), 2)
else: #middle left
cv2.rectangle(frame, (int(imageWidth),int(imageHeight*0.66666)), (int(imageWidth * 0.66666), int(imageHeight * 0.33333)), (0, 255, 0), 2)
else:
if faceY < 0.33333: #top middle
cv2.rectangle(frame, (int(imageWidth * 0.66666),0), (int(imageWidth * 0.33333), int(imageHeight * 0.33333)), (0, 255, 0), 2)
elif faceY > 0.66666: #bottom middle
cv2.rectangle(frame, (int(imageWidth * 0.66666), imageHeight), (int(imageWidth * 0.33333), int(imageHeight * 0.66666)), (0, 255, 0), 2)
else: #middle middle
cv2.rectangle(frame, (int(imageWidth * 0.66666),int(imageHeight*0.66666)), (int(imageWidth * 0.33333), int(imageHeight * 0.33333)), (0, 255, 0), 2)
# Flip the frame
cv2.imshow('face detection', cv2.flip(frame, 1))
if cv2.waitKey(5) & 0xFF == ord('q'):
break
input.release()