-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.py
171 lines (134 loc) · 5.29 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
#Jin Hoon Bang
"""
deep feed forward network for Futures from CBOE Futures Exchange
"""
import theano
import theano.tensor as T
import numpy
import glob
import numpy as np
from src.algorithms.SGD4FFN import SGD4FFN as SGD4FFN
import sys
theano.config.exception_verbosity = 'high'
theano.config.optimizer = 'fast_compile'
np.set_printoptions(threshold=30)
log = open('speed_test_ep50.log', 'w')
sys.stdout = log
params = dict(
dataset = glob.glob('/home/jbang/data/smallHybrid/*'),
hiddenLayers = [1000, 900, 800, 700],
n_in = 0, #chosen after data is loaded
n_out = 129, # number of classes
n_row = 500,
batch_size = 20,
n_epochs = 2,
# with_projection = True, # applicable only with actOptimization
# model = "plain" # actChoice or plain or actOptimization
)
def preprocessData(path):
files = []
for file in path:
files.append(file)
files.sort()
# dfLabel = pd.DataFrame(dtype='float64')
# dfFeature = pd.DataFrame(dtype='float64')
label = np.zeros((params['n_row'], 43))
feature = np.zeros((params['n_row'], 10000))
index = 0
col_index = 0
#
# dfLabel = pd.DataFrame(dtype='float64')
# dfFeature = pd.DataFrame(dtype='float64')
for file in files:
binary = np.fromfile(file, dtype='float64')
numRow=binary[0]
numCol=binary[1]
print("Num Row", numRow)
print("Num Col", numCol)
binary=np.delete(binary,[0,1])
binary=binary.reshape((numRow,numCol))
binary = binary[:params['n_row']]
print(binary)
label[:,index] = binary[:,0]
feature[:, col_index:col_index+numCol-1] = binary[:, 1:]
col_index += numCol-1
index += 1
#label.append(binary[:,0])
#feature.append(binary[:, 1:])
# tempLabel=pd.DataFrame(binary[:,0])
# tempFeature=pd.DataFrame(binary[:,1:])
# dfLabel=pd.concat([dfLabel, tempLabel],axis=1)
# dfFeature=pd.concat([dfFeature, tempFeature],axis=1)
# label = dfLabel.tail(params['n_row']).as_matrix()
# label = label+1
# feature = dfFeature.tail(params['n_row']).as_matrix()
feature = feature[:,:col_index]
label = label[:,0]
#label = np.array(label)
#feature = np.array(feature)
print("label", label.shape)
print("feature", feature.shape)
label = label.squeeze()
feature = feature.squeeze()
print("label", label.shape)
print("feature", feature.shape)
label = label.astype('int32')
feature = feature.astype('float64')
return feature, label
def trainTestSplit(feature, label):
n_train = 0.6 * params['n_row']
n_valid = 0.2 * params['n_row']
n_test = 0.2 * params['n_row']
x_train = feature[:n_train]
y_train = label[:n_train]
x_valid = feature[n_train: n_train + n_valid]
y_valid = label[n_train: n_train + n_valid]
x_test = feature[n_train + n_valid:]
y_test = label[n_train + n_valid:]
train_set = (x_train, y_train)
valid_set = (x_valid, y_valid)
test_set = (x_test, y_test)
return train_set, valid_set, test_set
def load_data(dataset):
feature, label = preprocessData(dataset)
params['n_in'] = feature.shape[1]
train_set, valid_set, test_set = trainTestSplit(feature, label)
#############
# LOAD DATA #
#############
#train_set, valid_set, test_set format: tuple(input, target)
#input and target are both numpy.ndarray of 2 dimensions (a matrix)
#each row correspond to an example.
def shared_dataset(data_xy, borrow=True):
""" Function that loads the dataset into shared variables
The reason we store our dataset in shared variables is to allow
Theano to copy it into the GPU memory (when code is run on GPU).
Since copying data into the GPU is slow, copying a minibatch everytime
is needed (the default behaviour if the data is not in a shared
variable) would lead to a large decrease in performance.
"""
data_x, data_y = data_xy
shared_x = theano.shared(numpy.asarray(data_x,
dtype=theano.config.floatX),
borrow=borrow)
shared_y = theano.shared(numpy.asarray(data_y,
dtype=theano.config.floatX),
borrow=borrow)
# When storing data on the GPU it has to be stored as floats
# therefore we will store the labels as ``floatX`` as well
# (``shared_y`` does exactly that). But during our computations
# we need them as ints (we use labels as index, and if they are
# floats it doesn't make sense) therefore instead of returning
# ``shared_y`` we will have to cast it to int. This little hack
# lets ous get around this issue
return shared_x, T.cast(shared_y, 'int32')
test_set_x, test_set_y = shared_dataset(test_set)
valid_set_x, valid_set_y = shared_dataset(valid_set)
train_set_x, train_set_y = shared_dataset(train_set)
rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
return rval
if __name__ == '__main__':
datasets = load_data(params['dataset'])
SGD4FFN(datasets,params['hiddenLayers'],params['n_in'],params['n_out'],n_epochs=params['n_epochs'])
log.close()