-
Notifications
You must be signed in to change notification settings - Fork 0
/
cam.py
88 lines (70 loc) · 3.24 KB
/
cam.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
import argparse
import time
import cv2
import os
from pycoral.adapters.common import input_size
from pycoral.adapters.detect import get_objects
from pycoral.utils.dataset import read_label_file
from pycoral.utils.edgetpu import make_interpreter
from pycoral.utils.edgetpu import run_inference
def main():
default_model_dir = '../all_models'
default_model = 'mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite'
default_labels = 'coco_labels.txt'
parser = argparse.ArgumentParser()
parser.add_argument('--model', help='.tflite model path',
default=os.path.join(default_model_dir,default_model))
parser.add_argument('--labels', help='label file path',
default=os.path.join(default_model_dir, default_labels))
parser.add_argument('--top_k', type=int, default=3,
help='number of categories with highest score to display')
parser.add_argument('--stream_address', help='IP and port of TCP stream. Format: ip:port', default = "10.36.36.61:8000")
parser.add_argument('--threshold', type=float, default=0.2,
help='detector score threshold')
parser.add_argument("--timeout", type=int, default=30, help="time to run in seconds")
args = parser.parse_args()
print('Loading {} with {} labels.'.format(args.model, args.labels))
interpreter = make_interpreter(args.model)
interpreter.allocate_tensors()
labels = read_label_file(args.labels)
inference_size = input_size(interpreter)
cap = cv2.VideoCapture('tcp://' + args.stream_address)
writer = cv2.VideoWriter('output1.avi', cv2.VideoWriter_fourcc(*'DIVX'), 30, (int(cap.get(3)), int(cap.get(4))))
# start_time = time.time()
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
cv2_im = frame
cv2_im_rgb = cv2.cvtColor(cv2_im, cv2.COLOR_BGR2RGB)
cv2_im_rgb = cv2.resize(cv2_im_rgb, inference_size)
before_inference = time.time()
run_inference(interpreter, cv2_im_rgb.tobytes())
print((time.time() - before_inference) * 1000)
objs = get_objects(interpreter, args.threshold)[:args.top_k]
cv2_im = append_objs_to_img(cv2_im, inference_size, objs, labels)
writer.write(cv2_im)
cv2.imshow('frame', cv2_im)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# if (time.time() - start_time) > args.timeout:
# print("Stopping...")
# break
cap.release()
writer.release()
cv2.destroyAllWindows()
def append_objs_to_img(cv2_im, inference_size, objs, labels):
height, width, channels = cv2_im.shape
scale_x, scale_y = width / inference_size[0], height / inference_size[1]
for obj in objs:
bbox = obj.bbox.scale(scale_x, scale_y)
x0, y0 = int(bbox.xmin), int(bbox.ymin)
x1, y1 = int(bbox.xmax), int(bbox.ymax)
percent = int(100 * obj.score)
label = '{}% {}'.format(percent, labels.get(obj.id, obj.id))
cv2_im = cv2.rectangle(cv2_im, (x0, y0), (x1, y1), (0, 255, 0), 2)
cv2_im = cv2.putText(cv2_im, label, (x0, y0+30),
cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 0, 0), 2)
return cv2_im
if __name__ == '__main__':
main()