-
Notifications
You must be signed in to change notification settings - Fork 3
/
fdetect.py
129 lines (121 loc) · 4.2 KB
/
fdetect.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
import cv2
import sys
import getcascades
def video_read(height, width):
cam = cv2.VideoCapture(-1)
cam.set(3, width)
cam.set(4, height)
video_capture = cam
return video_capture
def webcam_face_detect_single(facecascade):
video = video_read(480,640)
while True:
ret, frame = video.read()
if ret:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
for i in range(len(facecascade)):
faces = facecascade[i].detectMultiScale(gray, 1.1, 5)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)
print("Face detected")
break
# Display the resulting frame
# print(type(frame))
if (type(frame) != 'NoneType'):
cv2.imshow('Video', frame)
else :
print("Check camera")
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release video capture
video.release()
cv2.destroyAllWindows()
def webcam_face_detect(facecascade):
video = video_read(480,640)
while True:
ret, frame = video.read()
if ret:
for i in range(len(facecascade)):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = facecascade[i].detectMultiScale(gray, 1.1, 5)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)
print("Face detected")
# Display the resulting frame
# print(type(frame))
if (type(frame) != 'NoneType'):
cv2.imshow('Video', frame)
else :
print("Check camera")
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release video capture
video.release()
cv2.destroyAllWindows()
def webcam_face_detect_template_matching(facecascade):
video_capture = video_read(480,640)
frame_number = 0
flag = 0
last_x = 0
last_y = 0
last_w = 0
last_h = 0
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
if ret:
if flag == 0:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
for i in range(len(facecascade)):
faces1 = facecascade[i].detectMultiScale(gray, 1.1, 5)
# Draw a rectangle around the faces
for (x, y, w, h) in faces1:
template = gray[y:y+h, x:x+w]
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)
roi_gray = gray[(y-10):(y+h+10), (x-10):(x+w+10)]
roi_gray2 = gray[(y-20):(y+h+20), (x-20):(x+w+20)]
last_x = x
last_y = y
last_h = h
last_w = w
flag = 1
frame_number=frame_number+1
else :
flag = 0
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
for i in range(len(facecascade)):
faces1 = facecascade[i].detectMultiScale(roi_gray, 1.1, 5)
# Draw a rectangle around the faces
for (x, y, w, h) in faces1:
x = last_x+x-10
y = last_y+y-10
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
roi_gray = gray[(y-10):(y+h+10), (x-10):(x+w+10)]
roi_gray2 = gray[(y-20):(y+h+20), (x-20):(x+w+20)]
template = gray[y:y+h, x:x+w]
last_x = x
last_y = y
last_h = h
last_w = w
flag = 1
frame_number=frame_number+1
if flag == 0 and frame_number != 0:
# Apply template Matching
res = cv2.matchTemplate(roi_gray2,template,cv2.TM_CCOEFF)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
top_left = max_loc
top_left = (top_left[0]+last_x-10, top_left[1]+last_y-10)
bottom_right = (top_left[0] + last_w, top_left[1] + last_h)
cv2.rectangle(frame,top_left, bottom_right, (0,255,0), 2)
roi_gray = gray[top_left[1]-10:top_left[1]+last_h+10, top_left[0]-10:(top_left[0]+last_w+10)]
frame_number = frame_number + 1
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
print(frame_number)
# Release video capture
video_capture.release()
cv2.destroyAllWindows()