-
Notifications
You must be signed in to change notification settings - Fork 0
/
recognize_cli.py
124 lines (112 loc) · 4.92 KB
/
recognize_cli.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
import click
import apis
import cv2
import numpy as np
import os
import glob
import time
@click.group()
def recognize():
pass
@click.command()
@click.argument('video-location')
@click.option('--saved-feature-loc', default='data/trained_features', help='the folder to save trained feature')
@click.option('--save-result-loc', default='data/result_videos', help='the folder we save result video')
@click.option('--most-frames', default=None, help='maximum frames to process and save, None means process all')
def recognize_faces_in_video(video_location, saved_feature_loc, save_result_loc, most_frames):
"""
Firstly read all features from feature folder. Detect all faces of one video.
Compare the face with faces in the feature folder. If recognized, label the name on the video.
Or label unknown. Output the labeled video to result folder
"""
def get_username(full_path):
"""
get username from full-path
"""
basename = os.path.basename(full_path)
return basename.split('.')[0]
feature_files = glob.glob(os.path.join(saved_feature_loc, '*.npy'))
features = [np.load(feature_file) for feature_file in feature_files]
usernames = [get_username(feature_file) for feature_file in feature_files]
# begin processing video
cap = cv2.VideoCapture(video_location)
if not cap.isOpened():
click.echo('cannot open this video', err=True)
return
# get the frame count of this video
video_length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT) - 1)
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
fps = cap.get(cv2.CAP_PROP_FPS)
fourcc = cv2.VideoWriter_fourcc('M','J','P','G')
# create video writer
output_movie = cv2.VideoWriter(os.path.join(save_result_loc, os.path.basename(video_location)), fourcc, fps, (int(width), int(height)))
frame_number = 0
if most_frames is None:
most_frames = video_length
else:
most_frames = min(int(most_frames), video_length)
while cap.isOpened():
ret, frame = cap.read()
frame_number = frame_number + 1
rgb_frame = frame[:, :, ::-1]
face_info_tuple = apis.recognize_faces_in_images(rgb_frame, features)
# draw rectangles
for top, right, bottom, left, username in face_info_tuple:
if username is None:
username = 'unknown'
else:
username = usernames[username]
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, username, (left + 6, bottom - 6), font, 0.5, (255, 255, 255), 1)
output_movie.write(frame)
click.echo('finish processing frame {}'.format(frame_number))
if (frame_number > most_frames - 1):
cap.release()
click.echo('finish processing')
break
output_movie.release()
@click.command()
@click.argument('image-folder')
@click.option('--saved-feature-loc', default='data/trained_features', help='the folder to save trained feature')
@click.option('--save-result-folder', default='data/result_images', help='the folder we save result video')
def recognize_faces_in_images(image_folder, saved_feature_loc, save_result_folder):
def get_username(full_path):
"""
get username from full-path
"""
basename = os.path.basename(full_path)
return basename.split('.')[0]
feature_files = glob.glob(os.path.join(saved_feature_loc, '*.npy'))
features = [np.load(feature_file) for feature_file in feature_files]
usernames = [get_username(feature_file) for feature_file in feature_files]
file_formats = ('*.png', '*.jpg', '*.jpeg')
img_files = []
for file_format in file_formats:
img_files.extend(glob.glob(os.path.join(image_folder, file_format)))
for img_file in img_files:
recognize_faces_in_image(img_file, features, usernames, os.path.join(save_result_folder, os.path.basename(img_file)))
def recognize_faces_in_image(image_location, features, usernames, result_location):
# read image
img = cv2.imread(image_location)
img = img[:,:,::-1]
start = time.time()
face_info_tuple = apis.recognize_faces_in_images(img, features)
img = img[:,:,::-1]
# draw rectangles
for top, right, bottom, left, username in face_info_tuple:
if username is None:
username = 'unknown'
else:
username = usernames[username]
cv2.rectangle(img, (left, top), (right, bottom), (0, 0, 255), 2)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(img, username, (left + 6, bottom - 6), font, 0.5, (255, 255, 255), 1)
end = time.time()
print('{} spends time: {}'.format(os.path.basename(image_location), end - start))
cv2.imwrite(result_location, img)
recognize.add_command(recognize_faces_in_video)
recognize.add_command(recognize_faces_in_images)
if __name__ == "__main__":
recognize()