-
Notifications
You must be signed in to change notification settings - Fork 0
/
train_learning.py
139 lines (111 loc) · 5.44 KB
/
train_learning.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
'''
code version 1.0 by hjc (from nju to ucas)
'''
import os
import shutil
import tensorboardX
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from dataset import WCDataset
from net_sphere import AngleLinear, AngleLoss, sphere20a, LSoftmaxLinear
from utils import Timer, get_config, get_model_list
def get_net(model_path, class_num, first_train):
if first_train:
net = sphere20a()
net.load_state_dict(torch.load(model_path))
net.fc6 = AngleLinear(512, class_num)
net.classnum = class_num
else:
net = sphere20a(classnum=class_num)
net.load_state_dict(torch.load(model_path))
#只训练最后一层卷积和全连接
for name, param in net.named_parameters():
param.requires_grad = name.startswith('conv4') or name.startswith('relu4') or name.startswith('fc') # or name.startswith('stn')
return net
def get_optimizer(net, hyperparameters):
return optim.SGD(
[{'params': [param for param in net.parameters() if param.requires_grad], 'initial_lr': hyperparameters['lr']}],
#[param for param in net.parameters() if param.requires_grad],
lr=hyperparameters['lr'],
momentum=hyperparameters['momentum'],
weight_decay=hyperparameters['weight_decay'],
)
def get_scheducer(optimizer, hyperparameters, last_epoch=-1):
return optim.lr_scheduler.MultiStepLR(
optimizer,
milestones=hyperparameters['milestones'],
gamma=hyperparameters['gamma'],
last_epoch=last_epoch
)
def train(net, dataloader, criterion, optimizer, scheducer, hyperparameters, writer, checkpoint_directory, iteration):
net.cuda()
net.train()
criterion = criterion.cuda()
if iteration == -1:
iteration = 0
while True:
total_count, accpeted_count = 0, 0
with Timer("Elapsed time in update: %f"):
for images, labels in dataloader:
images, labels = images.cuda(), labels.cuda()
iteration += 1 #此处iteration相当于跑完一个batch
optimizer.zero_grad()
outputs = net(images, labels)#返回每组P和C的特征提取结果
loss = criterion(outputs, labels)#
loss.backward()
optimizer.step()
total_count += len(images)
accpeted_count += int(torch.sum(labels == torch.argmax(outputs[0], dim=1))) #
if iteration % hyperparameters['log_iter'] == 0:
writer.add_scalar('loss', float(loss), iteration)
writer.add_scalar('accurency', accpeted_count / total_count, iteration)
scheducer.step() # 跑完log_iter个batch调整一下lr
print("loss=%f, lr=%f" % (loss.item(), optimizer.param_groups[0]['lr']))
print("Iteration: %08d/%08d" % (iteration, hyperparameters['max_iter']))
print("total_count:%d, accepted_count:%d, acc:%f" % (total_count,accpeted_count,accpeted_count / total_count))
if iteration % hyperparameters['snapshot_save_iter'] == 0:
torch.save(net.state_dict(), os.path.join(checkpoint_directory, '%08d.pth' % iteration))
if iteration >= hyperparameters['max_iter']:
return 0
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, default='config/init.yaml', help='Path to the config file.')
parser.add_argument('--output_path', type=str, default='.', help="outputs path")
parser.add_argument("--resume", default=True,action="store_true")
parser.add_argument('--myGpu', default='0', help='GPU Number')
opts = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = opts.myGpu
config = get_config(opts.config)
# Setup logger and output foders
# from git import Repo
# repo = Repo('.')
model_name = '%s_%s' % (os.path.splitext(os.path.basename(opts.config))[0], str('original_dataset')) # init_..
train_writer = tensorboardX.SummaryWriter(os.path.join(opts.output_path + "/logs", model_name)) # ./logs......
output_directory = os.path.join(opts.output_path + "/outputs", model_name)
checkpoint_directory = os.path.join(output_directory, 'checkpoints')
os.makedirs(checkpoint_directory, exist_ok=True)
shutil.copy(opts.config, os.path.join(output_directory, 'config.yaml')) # copy config file to output folder
dataloader = DataLoader(
WCDataset(config['dataset_path']),
batch_size=config['batch_size'],
shuffle=True,
drop_last=True,
num_workers=config['num_workers'],
)
if opts.resume: # opts.resume=False
last_model_name = get_model_list(checkpoint_directory)
iteration = int(last_model_name[:-4][-8:])
net = get_net(last_model_name, dataloader.dataset.class_num, False)
optimizer = get_optimizer(net, config)
scheducer = get_scheducer(optimizer, config, iteration)
print('Resume from iteration %d' % iteration)
else:
iteration = 0
net = get_net(config['weight_path'], dataloader.dataset.class_num, True)
optimizer = get_optimizer(net, config)
scheducer = get_scheducer(optimizer, config)
criterion = AngleLoss()
# criterion = nn.CrossEntropyLoss()
train(net, dataloader, criterion, optimizer, scheducer, config, train_writer, checkpoint_directory, iteration)