-
Notifications
You must be signed in to change notification settings - Fork 4
/
train_uap.py
241 lines (209 loc) · 11.7 KB
/
train_uap.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
from __future__ import division
import os, sys, time, random, copy
import numpy as np
import torch
import argparse
import torch.backends.cudnn as cudnn
import torch.nn as nn
from collections import OrderedDict
from networks.uap import UAP
from utils.data import get_data_specs, get_data
from utils.utils import get_model_path, get_result_path
from utils.utils import print_log
from utils.network import get_network, set_parameter_requires_grad
from utils.network import get_num_parameters, get_num_non_trainable_parameters, get_num_trainable_parameters
from utils.training import train, save_checkpoint, metrics_evaluate
from utils.custom_loss import LogitLoss, BoundedLogitLoss, NegativeCrossEntropy, BoundedLogitLossFixedRef, BoundedLogitLoss_neg
def parse_arguments():
parser = argparse.ArgumentParser(description='Trains a UAP')
# pretrained
parser.add_argument('--dataset', default='imagenet', choices=['cifar10', 'cifar100', 'imagenet', 'coco', 'voc', 'places365'],
help='Used dataset to generate UAP (default: imagenet)')
parser.add_argument('--pretrained_dataset', default='imagenet', choices=['cifar10', 'cifar100', 'imagenet'],
help='Used dataset to train the initial model (default: imagenet)')
parser.add_argument('--pretrained_arch', default='vgg16', choices=['vgg16_cifar', 'vgg19_cifar', 'resnet20', 'resnet56',
'alexnet', 'googlenet', 'vgg16', 'vgg19',
'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152',
'inception_v3'],
help='Used model architecture: (default: vgg16)')
parser.add_argument('--pretrained_seed', type=int, default=123,
help='Seed used in the generation process (default: 123)')
# Parameters regarding UAP
parser.add_argument('--epsilon', type=float, default=0.03922,
help='Norm restriction of UAP (default: 10/255)')
parser.add_argument('--num_iterations', type=int, default=2000,
help='Number of iterations (default: 2000)')
parser.add_argument('--result_subfolder', default='default', type=str,
help='result subfolder name')
parser.add_argument('--postfix', default='',
help='Postfix to attach to result folder')
# Optimization options
parser.add_argument('--loss_function', default='ce', choices=['ce', 'neg_ce', 'logit', 'bounded_logit',
'bounded_logit_fixed_ref', 'bounded_logit_neg'],
help='Used loss function for source classes: (default: cw_logit)')
parser.add_argument('--confidence', default=0., type=float,
help='Confidence value for C&W losses (default: 0.0)')
parser.add_argument('--targeted', action='store_true',
help='Target a specific class (default: False)')
parser.add_argument('--target_class', type=int, default=0,
help='Target class (default: 0)')
parser.add_argument('--batch_size', type=int, default=32,
help='Batch size (default: 32)')
parser.add_argument('--learning_rate', type=float, default=0.001,
help='Learning Rate (default: 0.001)')
parser.add_argument('--print_freq', default=200, type=int, metavar='N',
help='print frequency (default: 200)')
parser.add_argument('--ngpu', type=int, default=1,
help='Number of used GPUs (0 = CPU) (default: 1)')
parser.add_argument('--workers', type=int, default=6,
help='Number of data loading workers (default: 6)')
args = parser.parse_args()
args.use_cuda = args.ngpu>0 and torch.cuda.is_available()
if args.pretrained_seed is None:
args.pretrained_seed = random.randint(1, 10000)
return args
def main():
args = parse_arguments()
random.seed(args.pretrained_seed)
torch.manual_seed(args.pretrained_seed)
if args.use_cuda:
torch.cuda.manual_seed_all(args.pretrained_seed)
cudnn.benchmark = True
# get the result path to store the results
result_path = get_result_path(dataset_name=args.dataset,
network_arch=args.pretrained_arch,
random_seed=args.pretrained_seed,
result_subfolder=args.result_subfolder,
postfix=args.postfix)
# Init logger
log_file_name = os.path.join(result_path, 'log.txt')
print("Log file: {}".format(log_file_name))
log = open(log_file_name, 'w')
print_log('save path : {}'.format(result_path), log)
state = {k: v for k, v in args._get_kwargs()}
for key, value in state.items():
print_log("{} : {}".format(key, value), log)
print_log("Random Seed: {}".format(args.pretrained_seed), log)
print_log("Python version : {}".format(sys.version.replace('\n', ' ')), log)
print_log("Torch version : {}".format(torch.__version__), log)
print_log("Cudnn version : {}".format(torch.backends.cudnn.version()), log)
_, pretrained_data_test = get_data(args.pretrained_dataset, args.pretrained_dataset)
pretrained_data_test_loader = torch.utils.data.DataLoader(pretrained_data_test,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True)
##### Dataloader for training ####
num_classes, (mean, std), input_size, num_channels = get_data_specs(args.pretrained_dataset)
data_train, _ = get_data(args.dataset, args.pretrained_dataset)
data_train_loader = torch.utils.data.DataLoader(data_train,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers,
pin_memory=True)
####################################
# Init model, criterion, and optimizer
print_log("=> Creating model '{}'".format(args.pretrained_arch), log)
# get a path for loading the model to be attacked
model_path = get_model_path(dataset_name=args.pretrained_dataset,
network_arch=args.pretrained_arch,
random_seed=args.pretrained_seed)
model_weights_path = os.path.join(model_path, "checkpoint.pth.tar")
target_network = get_network(args.pretrained_arch,
input_size=input_size,
num_classes=num_classes,
finetune=False)
print_log("=> Network :\n {}".format(target_network), log)
target_network = torch.nn.DataParallel(target_network, device_ids=list(range(args.ngpu)))
# Set the target model into evaluation mode
target_network.eval()
# Imagenet models use the pretrained pytorch weights
if args.pretrained_dataset != "imagenet":
network_data = torch.load(model_weights_path)
target_network.load_state_dict(network_data['state_dict'])
# Set all weights to not trainable
set_parameter_requires_grad(target_network, requires_grad=False)
non_trainale_params = get_num_non_trainable_parameters(target_network)
trainale_params = get_num_trainable_parameters(target_network)
total_params = get_num_parameters(target_network)
print_log("Target Network Trainable parameters: {}".format(trainale_params), log)
print_log("Target Network Non Trainable parameters: {}".format(non_trainale_params), log)
print_log("Target Network Total # parameters: {}".format(total_params), log)
print_log("=> Inserting Generator", log)
generator = UAP(shape=(input_size, input_size),
num_channels=num_channels,
mean=mean,
std=std,
use_cuda=args.use_cuda)
print_log("=> Generator :\n {}".format(generator), log)
non_trainale_params = get_num_non_trainable_parameters(generator)
trainale_params = get_num_trainable_parameters(generator)
total_params = get_num_parameters(generator)
print_log("Generator Trainable parameters: {}".format(trainale_params), log)
print_log("Generator Non Trainable parameters: {}".format(non_trainale_params), log)
print_log("Generator Total # parameters: {}".format(total_params), log)
perturbed_net = nn.Sequential(OrderedDict([('generator', generator), ('target_model', target_network)]))
perturbed_net = torch.nn.DataParallel(perturbed_net, device_ids=list(range(args.ngpu)))
non_trainale_params = get_num_non_trainable_parameters(perturbed_net)
trainale_params = get_num_trainable_parameters(perturbed_net)
total_params = get_num_parameters(perturbed_net)
print_log("Perturbed Net Trainable parameters: {}".format(trainale_params), log)
print_log("Perturbed Net Non Trainable parameters: {}".format(non_trainale_params), log)
print_log("Perturbed Net Total # parameters: {}".format(total_params), log)
# Set the target model into evaluation mode
perturbed_net.module.target_model.eval()
perturbed_net.module.generator.train()
if args.loss_function == "ce":
criterion = torch.nn.CrossEntropyLoss()
elif args.loss_function == "neg_ce":
criterion = NegativeCrossEntropy()
elif args.loss_function == "logit":
criterion = LogitLoss(num_classes=num_classes, use_cuda=args.use_cuda)
elif args.loss_function == "bounded_logit":
criterion = BoundedLogitLoss(num_classes=num_classes, confidence=args.confidence, use_cuda=args.use_cuda)
elif args.loss_function == "bounded_logit_fixed_ref":
criterion = BoundedLogitLossFixedRef(num_classes=num_classes, confidence=args.confidence, use_cuda=args.use_cuda)
elif args.loss_function == "bounded_logit_neg":
criterion = BoundedLogitLoss_neg(num_classes=num_classes, confidence=args.confidence, use_cuda=args.use_cuda)
else:
raise ValueError
if args.use_cuda:
target_network.cuda()
generator.cuda()
perturbed_net.cuda()
criterion.cuda()
optimizer = torch.optim.Adam(perturbed_net.parameters(), lr=state['learning_rate'])
# Measure the time needed for the UAP generation
start = time.time()
train(data_loader=data_train_loader,
model=perturbed_net,
criterion=criterion,
optimizer=optimizer,
epsilon=args.epsilon,
num_iterations=args.num_iterations,
targeted=args.targeted,
target_class=args.target_class,
log=log,
print_freq=args.print_freq,
use_cuda=args.use_cuda)
end = time.time()
print_log("Time needed for UAP generation: {}".format(end - start), log)
# evaluate
print_log("Final evaluation:", log)
metrics_evaluate(data_loader=pretrained_data_test_loader,
target_model=target_network,
perturbed_model=perturbed_net,
targeted=args.targeted,
target_class=args.target_class,
log=log,
use_cuda=args.use_cuda)
save_checkpoint({
'arch' : args.pretrained_arch,
# 'state_dict' : perturbed_net.state_dict(),
'state_dict' : perturbed_net.module.generator.state_dict(),
'optimizer' : optimizer.state_dict(),
'args' : copy.deepcopy(args),
}, result_path, 'checkpoint.pth.tar')
log.close()
if __name__ == '__main__':
main()