forked from ialhashim/DenseDepth
-
Notifications
You must be signed in to change notification settings - Fork 0
/
data.py
116 lines (85 loc) · 4.53 KB
/
data.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
import numpy as np
from utils import DepthNorm
from io import BytesIO
from PIL import Image
from zipfile import ZipFile
from keras.utils import Sequence
from augment import BasicPolicy
def extract_zip(input_zip):
input_zip=ZipFile(input_zip)
return {name: input_zip.read(name) for name in input_zip.namelist()}
def resize(img, resolution=480, padding=6):
from skimage.transform import resize
return resize(img, (resolution, int(resolution*4/3)), preserve_range=True, mode='reflect', anti_aliasing=True )
def get_data(batch_size, data_zipfile):
data = extract_zip(data_zipfile)
train = list((row.split(',') for row in (data['data/train.csv']).decode("utf-8").split('\n') if len(row) > 0))
test = list((row.split(',') for row in (data['data/test.csv']).decode("utf-8").split('\n') if len(row) > 0))
shape_rgb = (batch_size, 480, 640, 3)
shape_depth = (batch_size, 240, 320, 1)
# Helpful for testing...
if False:
train = train[:10]
test = test[:10]
return data, train, test, shape_rgb, shape_depth
def get_train_test_data(batch_size, data_zipfile, max_depth):
data, train, test, shape_rgb, shape_depth = get_data(batch_size, data_zipfile)
train_generator = BasicAugmentRGBSequence(data, train, batch_size=batch_size, shape_rgb=shape_rgb, shape_depth=shape_depth, max_depth=max_depth)
test_generator = BasicRGBSequence(data, test, batch_size=batch_size, shape_rgb=shape_rgb, shape_depth=shape_depth, max_depth=max_depth)
return train_generator, test_generator
class BasicAugmentRGBSequence(Sequence):
def __init__(self, data, dataset, batch_size, shape_rgb, shape_depth, is_flip=False, is_addnoise=False, is_erase=False, max_depth=1000.0):
self.data = data
self.dataset = dataset
self.policy = BasicPolicy( color_change_ratio=0.50, mirror_ratio=0.50, flip_ratio=0.0 if not is_flip else 0.2,
add_noise_peak=0 if not is_addnoise else 20, erase_ratio=-1.0 if not is_erase else 0.5)
self.batch_size = batch_size
self.shape_rgb = shape_rgb
self.shape_depth = shape_depth
self.maxDepth = max_depth
from sklearn.utils import shuffle
self.dataset = shuffle(self.dataset, random_state=0)
self.N = len(self.dataset)
def __len__(self):
return int(np.ceil(self.N / float(self.batch_size)))
def __getitem__(self, idx, is_apply_policy=True):
batch_x, batch_y = np.zeros( self.shape_rgb ), np.zeros( self.shape_depth )
# Augmentation of RGB images
for i in range(batch_x.shape[0]):
index = min((idx * self.batch_size) + i, self.N-1)
sample = self.dataset[index]
x = np.clip(np.asarray(Image.open( BytesIO(self.data[sample[0]]) )).reshape(480,640,3)/255,0,1)
y = np.clip(np.asarray(Image.open( BytesIO(self.data[sample[1]]) )).reshape(480,640,1)/255*self.maxDepth,0,self.maxDepth)
y = DepthNorm(y, maxDepth=self.maxDepth)
batch_x[i] = resize(x, 480)
batch_y[i] = resize(y, 240)
if is_apply_policy: batch_x[i], batch_y[i] = self.policy(batch_x[i], batch_y[i])
# DEBUG:
#self.policy.debug_img(batch_x[i], np.clip(DepthNorm(batch_y[i])/maxDepth,0,1), idx, i)
#exit()
return batch_x, batch_y
class BasicRGBSequence(Sequence):
def __init__(self, data, dataset, batch_size,shape_rgb, shape_depth, max_depth=1000.0):
self.data = data
self.dataset = dataset
self.batch_size = batch_size
self.N = len(self.dataset)
self.shape_rgb = shape_rgb
self.shape_depth = shape_depth
self.maxDepth = max_depth
def __len__(self):
return int(np.ceil(self.N / float(self.batch_size)))
def __getitem__(self, idx):
batch_x, batch_y = np.zeros( self.shape_rgb ), np.zeros( self.shape_depth )
for i in range(self.batch_size):
index = min((idx * self.batch_size) + i, self.N-1)
sample = self.dataset[index]
x = np.clip(np.asarray(Image.open( BytesIO(self.data[sample[0]]))).reshape(480,640,3)/255,0,1)
y = np.asarray(Image.open(BytesIO(self.data[sample[1]])), dtype=np.float32).reshape(480,640,1).copy().astype(float) / 10.0
y = DepthNorm(y, maxDepth=self.maxDepth)
batch_x[i] = resize(x, 480)
batch_y[i] = resize(y, 240)
# DEBUG:
#self.policy.debug_img(batch_x[i], np.clip(DepthNorm(batch_y[i])/maxDepth,0,1), idx, i)
#exit()
return batch_x, batch_y