forked from chrisorozco1097/brain_segmentation
-
Notifications
You must be signed in to change notification settings - Fork 0
/
kfold.py
98 lines (74 loc) · 2.89 KB
/
kfold.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
import os
import sys
ROOT_DIR = os.path.abspath('.')
sys.path.append(ROOT_DIR)
from datahandler import DataHandler
from model_provider import getModel
from generator import *
from params import *
from callbacks import getCallbacks
from kfold_data_loader import *
from tqdm import tqdm
import os
import skimage.io as io
from keras.models import *
from keras import backend as K
import argparse
import sys
import tensorflow as tf
os.environ["CUDA_DEVICE_ORDER"]="00000000:D8:00.0"
os.environ["CUDA_VISIBLE_DEVICES"]="1"
#list of model names you want to train,
#the logs will be saved with these names
model_names = ['unet']
for model_type in model_names:
#load data files and split into 10-fold
image_files, mask_files = load_data_files('data/kfold_data/')
skf = getKFolds(image_files, mask_files, n=10)
kfold_indices = []
for train_index, val_index in skf.split(image_files, mask_files):
kfold_indices.append({'train': train_index, 'val': val_index})
#Get data and generators
dh = DataHandler()
tr_images, tr_masks, te_images, te_masks = dh.getData()
start = 0
end = len(kfold_indices)
for i in range(start, end):
#create the experiment name for each subfolder
exp_name = 'kfold_%s_dice_DA_K%d'%(model_type, i)
#get parameters
params = getParams(exp_name, model_type)
#set common variables
epochs = params['epochs']
batch_size = params['batch_size']
verbose = params['verbose']
augmentation = False
steps_per_epoch = len(tr_images) / batch_size
#This I used because using augmentation I double the number of trainig data,
#but I also increase de batch size
if 'unet_bn' in model_type or 'unet_attention_bn' in model_type:
batch_size *= 2
augmentation = True
steps_per_epoch = 2 * len(tr_images) / batch_size
#Get model and add weights
model = getModel(model_type)
#get the data for each fold
#get generators
train_generator = getGenerator(tr_images, tr_masks,
augmentation = augmentation, batch_size=batch_size)
val_generator = getGenerator(te_images, te_masks,
augmentation = False, batch_size=batch_size)
#save model file
model_json = model.to_json()
with open(params['model_name'], "w") as json_file:
json_file.write(model_json)
#get callbacks
Checkpoint, EarlyStop, ReduceLR, Logger, TenBoard = getCallbacks(params)
#Train the model
history = model.fit_generator(train_generator,
epochs=epochs,
steps_per_epoch = steps_per_epoch,
validation_data = val_generator,
validation_steps = len(te_images) / batch_size,
verbose = verbose,
callbacks = [Checkpoint, Logger, TenBoard])