-
Notifications
You must be signed in to change notification settings - Fork 1
/
eval_clean.py
108 lines (80 loc) · 3.33 KB
/
eval_clean.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
from __future__ import print_function
import torch
import numpy as np
import pandas as pd
import os
import sys
import time
import argparse
import re
from utils import setup_seed
from utils import get_datasets, get_model
from utils import Logger
from utils import AverageMeter, accuracy
# ======== fix data type ========
torch.set_default_tensor_type(torch.FloatTensor)
# ======== options ==============
parser = argparse.ArgumentParser(description='Evaluation on clean samples')
# -------- file param. --------------
parser.add_argument('--data_dir',type=str,default='./data/CIFAR10/',help='data directory')
parser.add_argument('--logs_dir',type=str,default='./logs/',help='logs directory')
parser.add_argument('--dataset',type=str,default='CIFAR10',help='data set name')
parser.add_argument('--model_path',type=str,default='./save/CIFAR10-VGG.pth',help='saved model path')
parser.add_argument('--arch',type=str,default='vgg16',help='model architecture')
parser.add_argument('--seed',type=int,default=0,help='random seeds')
parser.add_argument('--batch_size',type=int,default=256,help='batch size for training (default: 256)')
args = parser.parse_args()
# ======== log writer init. ========
hyperparam=os.path.split(os.path.split(args.model_path)[-2])[-1]
if not os.path.exists(os.path.join(args.logs_dir,args.dataset,args.arch,'eval')):
os.makedirs(os.path.join(args.logs_dir,args.dataset,args.arch,'eval'))
args.logs_path = os.path.join(args.logs_dir,args.dataset,args.arch,'eval',hyperparam+'-clean.log')
sys.stdout = Logger(filename=args.logs_path,stream=sys.stdout)
# -------- main function
def main():
# ======== fix random seed ========
setup_seed(args.seed)
# ======== get data set =============
trainloader, testloader = get_datasets(args)
print('-------- DATA INFOMATION --------')
print('---- dataset: '+args.dataset)
# ======== load network ========
checkpoint = torch.load(args.model_path, map_location=torch.device("cpu"))
net = get_model(args).cuda()
net.load_state_dict(checkpoint['state_dict'])
print('-------- MODEL INFORMATION --------')
print('---- arch.: '+args.arch)
print('---- saved path: '+args.model_path)
print('---- inf. seed.: '+str(args.seed))
# ======== evaluation on clean ========
print('Validating...')
if args.dataset == 'ImageNet':
acc_te = val(net, testloader)
print(' test acc. = %.2f.'%(acc_te.avg))
else:
acc_tr, acc_te = val(net, trainloader), val(net, testloader)
print(' train/test acc. = %.3f/%.2f.'%(acc_tr.avg, acc_te.avg))
print("Finished.")
return
def val(net, dataloader):
net.eval()
batch_time = AverageMeter()
acc = AverageMeter()
end = time.time()
with torch.no_grad():
# -------- compute the accs.
for test in dataloader:
images, labels = test
images, labels = images.cuda(), labels.cuda()
# ------- forward
logits = net(images).detach().float()
prec1 = accuracy(logits.data, labels)[0]
acc.update(prec1.item(), images.size(0))
# ----
batch_time.update(time.time()-end)
end = time.time()
print(' Validation costs %fs.'%(batch_time.sum))
return acc
# ======== startpoint
if __name__ == '__main__':
main()