-
Notifications
You must be signed in to change notification settings - Fork 0
/
userconf.yml
91 lines (78 loc) · 3.02 KB
/
userconf.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
# User-defined configuration file; things here depend on where your files are etc.
# Where the RDSF is mounted - for reading the data + creating DICOMs
# The create_dicoms.py script creates these
rdsf_dir: "/home/mh19137/zebrafish_rdsf/"
# Where the DICOMs are stored - this is where the training data lives
dicom_dirs:
- "/home/mh19137/zebrafish_jaw_segmentation/dicoms/Training set 1/"
- "/home/mh19137/zebrafish_jaw_segmentation/dicoms/Training set 2/"
- "/home/mh19137/zebrafish_jaw_segmentation/dicoms/Training set 3 (base of jaw)/"
# Must end in .pkl (the model will be pickled)
# The model will be saved in the model/ directory
model_path: "test.pkl"
# Which ones to use for testing and validation
# all the others will be used for testing
validation_dicoms:
- "39" # 7 month wt, labelled by Wahab
- "86" # 3 year wt, labelled by Felix
# At the moment this can only be 1-length
test_dicoms:
- "131" # 2.5yr het chst11 chst11 talen
# Optimiser options
optimiser: "Adam" # Must be one of the torch.optim optimiser
learning_rate: 0.001
# Loss function options
loss: "monai.losses.TverskyLoss"
loss_options: {
"include_background": false,
"to_onehot_y": true,
"alpha": 0.15,
"beta": 0.85,
"sigmoid": true,
}
# RNG seeds
# Note that this still doesn't guarantee reproducibility,
# since the dataloaders and probably some algorithms/other things have different sources
# of randomness
# But (hopefully) it should mean our model is initialised the same way each time
torch_seed: 0
test_train_seed: 1
# Options for the model that you might want to change
# These are sort of like meta-parameters so they're not in the model_params section but maybe they should be
device: "cuda"
window_size: "192,192,192" # Comma-separated ZYX. Needs to be large enough to hold the whole jaw
patch_size: "160,160,160" # Bigger holds more context, smaller is faster and allows for bigger batches
batch_size: 12
epochs: 500
lr_lambda: 0.99999 # Exponential decay factor (multiplicative with each epoch)
num_workers: 6 # Number of workers for the dataloader
# Options should be passed
transforms:
torchio.RandomFlip:
axes: [0, 1, 2]
flip_probability: 0.5
torchio.RandomAffine:
p: 0.25
degrees: 10
scales: 0.2
# Other options might be
# torchio.RandomBlur(p=0.3),
# torchio.RandomBiasField(0.4, p=0.5),
# torchio.RandomNoise(0.1, 0.01, p=0.25),
# torchio.RandomGamma((-0.3, 0.3), p=0.25),
# torchio.ZNormalization(),
# torchio.RescaleIntensity(percentiles=(0.5, 99.5)),
model_params:
model_name: "monai.networks.nets.AttentionUnet"
# Things you probably won't need to change - I just kept them here to keep all the params in one place
spatial_dims: 3
n_classes: 2 # n bones + background
in_channels: 1 # Our images are greyscale
# Things you might want to change
# At the moment, I can't have more than 6 layers in the model because the receptive field
# gets halved and ends up being an odd number, and then when we upsample we get a size mismatch
n_layers: 6
n_initial_channels: 8
kernel_size: 3
stride: 2
dropout: 0.01