-
Notifications
You must be signed in to change notification settings - Fork 0
/
fab_tf.py
87 lines (74 loc) · 2.53 KB
/
fab_tf.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
# Copyright (c) 2019-present, Francesco Croce
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
from fab_base import FABAttack
class FABAttack_TF(FABAttack):
"""
Fast Adaptive Boundary Attack (Linf, L2, L1)
https://arxiv.org/abs/1907.02044
:param model: TF_model
:param norm: Lp-norm to minimize ('Linf', 'L2', 'L1' supported)
:param n_restarts: number of random restarts
:param n_iter: number of iterations
:param eps: epsilon for the random restarts
:param alpha_max: alpha_max
:param eta: overshooting
:param beta: backward step
"""
def __init__(
self,
model,
norm='Linf',
n_restarts=1,
n_iter=100,
eps=None,
alpha_max=0.1,
eta=1.05,
beta=0.9,
loss_fn=None,
verbose=False,
seed=0,
targeted=False,
device=None,
n_target_classes=9):
""" FAB-attack implementation in TF2 """
self.model = model
super().__init__(norm,
n_restarts,
n_iter,
eps,
alpha_max,
eta,
beta,
loss_fn,
verbose,
seed,
targeted,
device,
n_target_classes)
def _predict_fn(self, x):
return self.model.predict(x)
def _get_predicted_label(self, x):
with torch.no_grad():
outputs = self._predict_fn(x)
_, y = torch.max(outputs, dim=1)
return y
def get_diff_logits_grads_batch(self, imgs, la):
y2, g2 = self.model.grad_logits(imgs)
df = y2 - y2[torch.arange(imgs.shape[0]), la].unsqueeze(1)
dg = g2 - g2[torch.arange(imgs.shape[0]), la].unsqueeze(1)
df[torch.arange(imgs.shape[0]), la] = 1e10
return df, dg
def get_diff_logits_grads_batch_targeted(self, imgs, la, la_target):
df, dg = self.model.get_grad_diff_logits_target(imgs, la, la_target)
df.unsqueeze_(1)
dg.unsqueeze_(1)
return df, dg