You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
import numpy as np
import torch
import matplotlib.pyplot as plt
import os
import glob
from Networks.LSQ_layer import Net
from Networks.utils import define_args
from PIL import Image
import torchvision.transforms.functional as F
from torchvision import transforms
import cv2
"""
Author: Wouter Van Gansbeke
Licensed under the CC BY-NC 4.0 license (https://creativecommons.org/licenses/by-nc/4.0/)
"""
import numpy as np
import torch
import matplotlib.pyplot as plt
import os
import glob
from Networks.LSQ_layer import Net
from Networks.utils import define_args
from PIL import Image
import torchvision.transforms.functional as F
from torchvision import transforms
import cv2
def getimage(path_img,i):
resize=256
imglists=os.listdir(path_img)
img_name =imglists[i]
with open(path_img+"/"+img_name, 'rb') as f:
image = (Image.open(f).convert('RGB'))
w, h = image.size
image = F.crop(image, h-640, 0, 640, w)
image = F.resize(image, size=(resize, 2*resize), interpolation=Image.BILINEAR)
image = transforms.ToTensor()(image).float()
def draw_homography_points(img, x, resize=256, color=(255,0,0)):
y_start1 = (0.3+x[2])(resize-1)
y_start = 0.3(resize-1)
y_stop = resize-1
src = np.float32([[0.45*(2resize-1),y_start],[0.55(2resize-1), y_start],[0.1(2resize-1),y_stop],[0.9(2resize-1), y_stop]])
dst = np.float32([[(0.45+x[0])(2resize-1), y_start1],[(0.55+x[1])(2resize-1), y_start1],[(0.45+x[0])(2resize-1), y_stop],[(0.55+x[1])(2resize-1),y_stop]])
dst_ideal = np.float32([[0.45(2resize-1), y_start],[0.55(2resize-1), y_start],[0.45(2resize-1), y_stop],[0.55(2resize-1),y_stop]])
xx=cv2.cvtColor((img255).astype(np.uint8), cv2.COLOR_RGB2BGR)
[cv2.circle(xx, tuple(list(map(int,idx))), radius=5, thickness=-1, color=(255,0,0)) for idx in src]
[cv2.circle(xx, tuple(list(map(int,idx))), radius=5, thickness=-1, color=(255,0,0)) for idx in dst_ideal]
[cv2.circle(xx, tuple(list(map(int,idx))), radius=5, thickness=-1, color=(255,0,0)) for idx in dst]
def draw_fitted_line(img, params, resize, color=(255,0,0)):
params = params.data.cpu().tolist()
y_stop = 0.7
y_prime = np.linspace(0, y_stop, 20)
params = [0] * (4 - len(params)) + params
d, a, b, c = [params]
x_pred = d(y_prime**3) + a*(y_prime)2 + b(y_prime) + c
x_pred = x_pred(2resize-1)
y_prime = (1-y_prime)(resize-1)
lane = [(xcord, ycord) for (xcord, ycord) in zip(x_pred, y_prime)]
img = cv2.polylines(img, [np.int32(lane)], isClosed = False, color = color,thickness = 1)
return img, lane
def test_projective_transform(input, resize, M):
# M_scaledup = np.array([[M[0,0],M[0,1]2,M[0,2](2resize-1)],[0,M[1,1],M[1,2](resize-1)],[0,M[2,1]/(resize-1),M[2,2]]])
M_scaledup = np.array([[M[0,0], M[0,1]2, M[0,2](2resize-1)],
[M[1,0]0.5, M[1,1], M[1,2](resize-1)],
[M[2,0]/(2resize-1), M[2,1]/(resize-1), M[2,2]]])
inp = cv2.warpPerspective(np.asarray(input), M_scaledup, (2*resize,resize))
return inp, M_scaledup
def save_weightmap( M, weightmap_zeros, beta0, beta1, beta2, beta3, images, no_ortho, resize):
M = M.data.cpu().numpy()[0]
x = np.zeros(3)
global args
parser = define_args()
args = parser.parse_args()
torch.backends.cudnn.benchmark = args.cudnn
model = Net(args)
if not args.no_cuda:
model = model.cuda()
best_file_name = glob.glob(os.path.join("/dataset/fsy/LaneDetection_End2End-master/Saved1", 'model_best*'))[0]
checkpoint = torch.load(best_file_name)
model.load_state_dict(checkpoint['state_dict'])
model.eval()
image,path=getimage('/dataset/fsy/LaneDetection_End2End-master/IMAGES',0)
input=image.unsqueeze(0)
input_data = input.cuda()
beta0, beta1, beta2, beta3, weightmap_zeros, M, output_net, outputs_line, outputs_horizon = model(input_data, args.end_to_end)
save_weightmap(M,weightmap_zeros,beta0, beta1, beta2, beta3,input_data,False, 256)
The text was updated successfully, but these errors were encountered: