-
Notifications
You must be signed in to change notification settings - Fork 0
/
01_neural_network_training complete.py
52 lines (43 loc) · 1.91 KB
/
01_neural_network_training complete.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import keras
from keras.datasets import cifar10
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
# Load data set
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data set to 0-to-1 range
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# Convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
# Create a model and add layers
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same', input_shape=(32, 32, 3), activation="relu"))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same', activation="relu"))
model.add(Conv2D(64, (3, 3), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(10, activation="softmax"))
# Compile the model
model.compile(
loss='categorical_crossentropy',
optimizer="adam",
metrics=['accuracy']
)
# Train the model
model.fit(
x_train,
y_train,
batch_size=32, # typically between 32 and 128 images. this indicates how many images we want to feed into the nn at once during training. If too low, training will take longer, if too high the PC may run out of memory
epochs=30, # this indicates how many we want to go through our traning dataset during the training process, the more pass the more chance the neural network has to learn; but the longer the training process will take. For large datasets this value should be low.
validation_data=(x_test, y_test), # these data will be used to test the nn
shuffle=True # this will randomize the order of the training data to prevent the network from memorizing
)