forked from Dreamdreams8/Ultra-Fast-Lane-Detection-Test
-
Notifications
You must be signed in to change notification settings - Fork 0
/
speed_real.py
153 lines (121 loc) · 4.26 KB
/
speed_real.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
# Thanks for the contribution of KopiSoftware https://github.com/KopiSoftware
import torch
import time
import numpy as np
from model.model import parsingNet
import torchvision.transforms as transforms
import cv2
from matplotlib import pyplot as plt
from PIL import Image
img_transforms = transforms.Compose([
transforms.Resize((288, 800)),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
def resize(x, y):
global cap
cap.set(3,x)
cap.set(4,y)
def test_practical_without_readtime():
global cap
for i in range(10):
_,img = cap.read()
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img2 = Image.fromarray(img)
x = img_transforms(img2)
x = x.unsqueeze(0).cuda()+1
y = net(x)
print("pracrical image input size:",img.shape)
print("pracrical tensor input size:",x.shape)
t_all = []
for i in range(100):
_,img = cap.read()
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img2 = Image.fromarray(img)
x = img_transforms(img2)
x = x.unsqueeze(0).cuda()+1
t1 = time.time()
y = net(x)
t2 = time.time()
t_all.append(t2 - t1)
print("practical with out read time:")
print('\taverage time:', np.mean(t_all) / 1)
print('\taverage fps:',1 / np.mean(t_all))
# print('fastest time:', min(t_all) / 1)
# print('fastest fps:',1 / min(t_all))
# print('slowest time:', max(t_all) / 1)
# print('slowest fps:',1 / max(t_all))
def test_practical():
global cap
for i in range(10):
_,img = cap.read()
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img2 = Image.fromarray(img)
x = img_transforms(img2)
x = x.unsqueeze(0).cuda()+1
y = net(x)
print("pracrical image input size:",img.shape)
print("pracrical tensor input size:",x.shape)
t_all = []
t_capture = []
t_preprocessing = []
t_net = []
for i in range(100):
t1 = time.time()
t3 = time.time()
_,img = cap.read()
t4 = time.time()
t5 = time.time()
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img2 = Image.fromarray(img)
x = img_transforms(img2)
x = x.unsqueeze(0).cuda()+1
t6 = time.time()
y = net(x)
t2 = time.time()
t_all.append(t2 - t1)
t_capture.append(t4 - t3)
t_preprocessing.append(t6 - t5)
t_net.append(t2 - t6)
print("practical with read time:")
print('\taverage time:', np.mean(t_all) / 1)
print('\taverage fps:',1 / np.mean(t_all))
print('\tcapture time:', np.mean(t_capture) / 1)
print('\tpre-processing time:', np.mean(t_preprocessing) / 1)
print('\tdetect time:', np.mean(t_net) / 1)
# print('fastest time:', min(t_all) / 1)
# print('fastest fps:',1 / min(t_all))
# print('slowest time:', max(t_all) / 1)
# print('slowest fps:',1 / max(t_all))
###x = torch.zeros((1,3,288,800)).cuda() + 1
def test_theoretical():
x = torch.zeros((1,3,288,800)).cuda() + 1
for i in range(10):
y = net(x)
t_all = []
for i in range(100):
t1 = time.time()
y = net(x)
t2 = time.time()
t_all.append(t2 - t1)
print("theortical")
print('\taverage time:', np.mean(t_all) / 1)
print('\taverage fps:',1 / np.mean(t_all))
# print('fastest time:', min(t_all) / 1)
# print('fastest fps:',1 / min(t_all))
# print('slowest time:', max(t_all) / 1)
# print('slowest fps:',1 / max(t_all))
if __name__ == "__main__":
###captrue data from camera or video
#cap = cv2.VideoCapture("video.mp4") #uncommen to activate a video input
cap = cv2.VideoCapture(0) #uncommen to activate a camera imput
#resize(480, 640) #ucommen to change input size
# torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
net = parsingNet(pretrained = False, backbone='18',cls_dim = (100+1,56,4),use_aux=False).cuda()
# net = parsingNet(pretrained = False, backbone='18',cls_dim = (200+1,18,4),use_aux=False).cuda()
net.eval()
test_practical_without_readtime()
test_practical()
cap.release()
test_theoretical()