-
Notifications
You must be signed in to change notification settings - Fork 1
/
dataset_preprocess.py
66 lines (52 loc) · 2.02 KB
/
dataset_preprocess.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
import argparse
import os
import pickle
from importlib import import_module
from tqdm import tqdm
import numpy as np
from torch.utils.data import DataLoader
from data_process import NSDatasets as Dataset, collate_fn
from utils import to_numpy, to_int16
import torch.multiprocessing
torch.multiprocessing.set_sharing_strategy('file_system')
import sys
os.umask(0)
root_path = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, root_path)
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", default="Lapred_original", type=str)
parser.add_argument("-p", "--path", default="./nuscenes/dataset", type=str)
parser.add_argument("--trial", '-t', default=None, type=int)
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES']='0'
model = import_module('Lapred_original')
config, *_ = model.get_model(args)
config["preprocess"] = False
config["DATAROOT"] = args.path
os.makedirs(os.path.dirname(config['preprocess_train']),exist_ok=True)
def data_preprocess(split, num_samples) :
dataset=Dataset(config["DATAROOT"], split, config, train=True)
train_loader = DataLoader(dataset, batch_size=1, num_workers=16, \
shuffle=False, collate_fn=collate_fn, pin_memory=True, drop_last=False)
stores = [None for x in range(32186)]
for i, data in enumerate(tqdm(train_loader)):
data = dict(data)
for j in range(len(data["idx"])) :
store = dict()
for key in ["idx","feats","ctrs","orig","theta","rot", \
"gt_preds","has_preds","ins_sam","map_info"] :
store[key] = to_numpy(data[key][j])
if key in ["map_info"]:
store[key] = to_int16(store[key])
stores[store["idx"]] = store
file_name = 'preprocess/{}_lapred_orig.p'.format(split)
f = open(os.path.join(root_path, file_name), 'wb')
print(f)
pickle.dump(stores, f, protocol=pickle.HIGHEST_PROTOCOL)
f.close()
print('preprocess train dataset')
data_preprocess('train', 32186)
print('preprocess train val dataset')
data_preprocess('train_val', 8560)
print('preprocess val dataset')
data_preprocess('val', 9041)