Created
May 18, 2020 20:52
-
-
Save VSehwag/688632e523df5d2a4c8008f5ee567b1c to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
## Make sure to first download the model_best_dense.pth.tar from https://www.dropbox.com/sh/56yyfy16elwbnr8/AADmr7bXgFkrNdoHjKWwIFKqa?dl=0 | |
import os | |
import argparse | |
import torch | |
import torch.nn as nn | |
import torchvision.datasets as datasets | |
import torch.utils.data as data | |
import torchvision.transforms as transforms | |
import torch.nn.functional as F | |
import math | |
import sys | |
sys.path.insert(0, "..") | |
class BasicBlock(nn.Module): | |
def __init__(self, conv_layer, in_planes, out_planes, stride, dropRate=0.0): | |
super(BasicBlock, self).__init__() | |
self.bn1 = nn.BatchNorm2d(in_planes) | |
self.relu1 = nn.ReLU(inplace=True) | |
self.conv1 = conv_layer( | |
in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False | |
) | |
self.bn2 = nn.BatchNorm2d(out_planes) | |
self.relu2 = nn.ReLU(inplace=True) | |
self.conv2 = conv_layer( | |
out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False | |
) | |
self.droprate = dropRate | |
self.equalInOut = in_planes == out_planes | |
self.convShortcut = ( | |
(not self.equalInOut) | |
and conv_layer( | |
in_planes, | |
out_planes, | |
kernel_size=1, | |
stride=stride, | |
padding=0, | |
bias=False, | |
) | |
or None | |
) | |
def forward(self, x): | |
if not self.equalInOut: | |
x = self.relu1(self.bn1(x)) | |
else: | |
out = self.relu1(self.bn1(x)) | |
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x))) | |
if self.droprate > 0: | |
out = F.dropout(out, p=self.droprate, training=self.training) | |
out = self.conv2(out) | |
return torch.add(x if self.equalInOut else self.convShortcut(x), out) | |
class NetworkBlock(nn.Module): | |
def __init__( | |
self, nb_layers, in_planes, out_planes, block, conv_layer, stride, dropRate=0.0 | |
): | |
super(NetworkBlock, self).__init__() | |
self.layer = self._make_layer( | |
conv_layer, block, in_planes, out_planes, nb_layers, stride, dropRate | |
) | |
def _make_layer( | |
self, conv_layer, block, in_planes, out_planes, nb_layers, stride, dropRate | |
): | |
layers = [] | |
for i in range(int(nb_layers)): | |
layers.append( | |
block( | |
conv_layer, | |
i == 0 and in_planes or out_planes, | |
out_planes, | |
i == 0 and stride or 1, | |
dropRate, | |
) | |
) | |
return nn.Sequential(*layers) | |
def forward(self, x): | |
return self.layer(x) | |
class WideResNet(nn.Module): | |
def __init__( | |
self, | |
conv_layer, | |
linear_layer, | |
depth=34, | |
num_classes=10, | |
widen_factor=10, | |
dropRate=0.0, | |
): | |
super(WideResNet, self).__init__() | |
nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor] | |
assert (depth - 4) % 6 == 0 | |
n = (depth - 4) / 6 | |
block = BasicBlock | |
# 1st conv before any network block | |
self.conv1 = conv_layer( | |
3, nChannels[0], kernel_size=3, stride=1, padding=1, bias=False | |
) | |
# 1st block | |
self.block1 = NetworkBlock( | |
n, nChannels[0], nChannels[1], block, conv_layer, 1, dropRate | |
) | |
# 1st sub-block | |
self.sub_block1 = NetworkBlock( | |
n, nChannels[0], nChannels[1], block, conv_layer, 1, dropRate | |
) | |
# 2nd block | |
self.block2 = NetworkBlock( | |
n, nChannels[1], nChannels[2], block, conv_layer, 2, dropRate | |
) | |
# 3rd block | |
self.block3 = NetworkBlock( | |
n, nChannels[2], nChannels[3], block, conv_layer, 2, dropRate | |
) | |
# global average pooling and classifier | |
self.bn1 = nn.BatchNorm2d(nChannels[3]) | |
self.relu = nn.ReLU(inplace=True) | |
self.fc = linear_layer(nChannels[3], num_classes) | |
self.nChannels = nChannels[3] | |
for m in self.modules(): | |
if isinstance(m, nn.Conv2d): | |
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels | |
m.weight.data.normal_(0, math.sqrt(2.0 / n)) | |
elif isinstance(m, nn.BatchNorm2d): | |
m.weight.data.fill_(1) | |
m.bias.data.zero_() | |
elif isinstance(m, linear_layer): | |
m.bias.data.zero_() | |
def forward(self, x): | |
out = self.conv1(x) | |
out = self.block1(out) | |
out = self.block2(out) | |
out = self.block3(out) | |
out = self.relu(self.bn1(out)) | |
out = F.avg_pool2d(out, 8) | |
out = out.view(-1, self.nChannels) | |
return self.fc(out) | |
def wrn_28_10(**kwargs): | |
return WideResNet(nn.Conv2d, nn.Linear, depth=28, widen_factor=10, **kwargs) | |
if __name__ == "__main__": | |
parser = argparse.ArgumentParser() | |
parser.add_argument("--data_dir", type=str, default="./data") | |
parser.add_argument("--norm", type=str, default="Linf") | |
parser.add_argument("--epsilon", type=float, default=8.0 / 255.0) | |
parser.add_argument("--model", type=str, default="./model_test.pt") | |
parser.add_argument("--n_ex", type=int, default=1000) | |
parser.add_argument("--individual", action="store_true") | |
parser.add_argument("--cheap", action="store_true") | |
parser.add_argument("--save_dir", type=str, default="./results") | |
parser.add_argument("--batch_size", type=int, default=500) | |
parser.add_argument("--plus", action="store_true") | |
args = parser.parse_args() | |
# load model | |
model = nn.DataParallel(wrn_28_10()) | |
ckpt = torch.load(args.model, map_location="cpu")["state_dict"] | |
model.load_state_dict(ckpt) | |
model.cuda() | |
model.eval() | |
# load data | |
transform_list = [transforms.ToTensor()] | |
transform_chain = transforms.Compose(transform_list) | |
item = datasets.CIFAR10( | |
root=args.data_dir, train=False, transform=transform_chain, download=True | |
) | |
test_loader = data.DataLoader(item, batch_size=1000, shuffle=False, num_workers=0) | |
# create save dir | |
if not os.path.exists(args.save_dir): | |
os.makedirs(args.save_dir) | |
# load attack | |
from autoattack import AutoAttack | |
adversary = AutoAttack(model, norm=args.norm, eps=args.epsilon) | |
l = [x for (x, y) in test_loader] | |
x_test = torch.cat(l, 0) | |
l = [y for (x, y) in test_loader] | |
y_test = torch.cat(l, 0) | |
# cheap version | |
if args.cheap: | |
adversary.cheap() | |
# plus version | |
if args.plus: | |
adversary.plus = True | |
# run attack and save images | |
with torch.no_grad(): | |
if not args.individual: | |
adv_complete = adversary.run_standard_evaluation( | |
x_test[: args.n_ex], y_test[: args.n_ex], bs=args.batch_size | |
) | |
torch.save( | |
{"adv_complete": adv_complete}, | |
"{}/{}_1_{}_eps_{:.5f}_plus_{}_cheap_{}.pth".format( | |
args.save_dir, | |
"aa", | |
adv_complete.shape[0], | |
args.epsilon, | |
args.plus, | |
args.cheap, | |
), | |
) | |
else: | |
# individual version, each attack is run on all test points | |
# specify attacks to run with | |
# adversary.attacks_to_run = ['apgd-ce'] | |
adv_complete = adversary.run_standard_evaluation_individual( | |
x_test[: args.n_ex], y_test[: args.n_ex], bs=args.batch_size | |
) | |
torch.save( | |
adv_complete, | |
"{}/{}_individual_1_{}_eps_{:.5f}_plus_{}_cheap_{}.pth".format( | |
args.save_dir, "aa", args.n_ex, args.epsilon, args.plus, args.cheap | |
), | |
) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment