Skip to content

Commit

Permalink
DeepPoly ReLU and FullyConnected
Browse files Browse the repository at this point in the history
  • Loading branch information
MekAkUActOR committed Nov 21, 2022
1 parent f4ca78e commit 1b1f55d
Show file tree
Hide file tree
Showing 3 changed files with 187 additions and 148 deletions.
9 changes: 7 additions & 2 deletions code/deeppoly.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,8 @@ def __init__(self, in_features):
super(DPReLU, self).__init__()
self.in_features = in_features
self.out_features = in_features
self.alpha = torch.nn.Parameter(torch.ones(in_features) * 0.5)
self.alpha = torch.nn.Parameter(torch.ones(in_features) * 0.3)
self.alpha.requires_grad = True

def forward(self, x):
x.save()
Expand All @@ -107,6 +108,7 @@ def forward(self, x):
'''
low < 0 < up
'''
# print("ALPHA: ", self.alpha)
slope_low_3 = torch.tan(self.alpha)
bias_low_3 = slope_low_3 * low - slope_low_3 * low
slope_up_3 = (F.relu(up) - F.relu(low)) / (up - low)
Expand All @@ -127,7 +129,7 @@ def forward(self, x):

class DPLinear(nn.Module):
def __init__(self, nested: nn.Linear):
super().__init__()
super(DPLinear, self).__init__()
self.weight = nested.weight.detach()
self.bias = nested.bias.detach()
self.in_features = nested.in_features
Expand All @@ -148,3 +150,6 @@ def forward(self, x):
return x


class DPConv(nn.Module):
def __init__(self, nested: nn.Conv2d):
super(DPConv, self).__init__()
10 changes: 5 additions & 5 deletions code/evaluate
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
#!/bin/bash

rm $1/res.txt
for net in {1..10}
rm ../examples/res.txt
for net in {1..3}
do
echo Evaluating network net${net}...
for spec in `ls $1/net${net}/`
for spec in `ls ../examples/net${net}/`
do
echo ${spec}
res=$(python verifier.py --net net${net} --spec $1/net${net}/${spec})
echo net${k}_${net},$spec,$res >> $1/res.txt
res=$(python verifier.py --net net${net} --spec ../examples/net${net}/${spec})
echo net${k}_${net},$spec,$res >> ../examples/res.txt
done
done
316 changes: 175 additions & 141 deletions code/verifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,131 +10,131 @@

DEVICE = 'cpu'
DTYPE = torch.float32
LR = 0.005
num_iter = 1000


class DeepPoly:
def __init__(self, lb, ub, lexpr, uexpr) -> None:
"""
Arguments
---------
lb : concrete lower bound
ub: concrete upper bound
lexpr: symbolic lower bound
uexpr: symbolic upper bound
"""
self.lb = lb
self.ub = ub
self.lexpr = lexpr
self.uexpr = uexpr
# TODO: remove asserts to improve speed
assert not torch.isnan(self.lb).any()
assert not torch.isnan(self.ub).any()
assert lexpr is None or (
(not torch.isnan(self.lexpr[0]).any())
and (not torch.isnan(self.lexpr[1]).any())
)
assert uexpr is None or (
(not torch.isnan(self.uexpr[0]).any())
and (not torch.isnan(self.uexpr[1]).any())
)
# TODO: what is dim here
self.dim = lb.size()[0]
@staticmethod
def deeppoly_from_perturbation(x, eps):
assert eps >= 0, "epsilon must not be negative value"
# We only have to verify images with pixel intensities between 0 and 1.
# e.g. if perturbation is 0.2 and pixel value is 0.9
# we verify range of [0.7, 1.0], instead of [0.7, 1.1]
lb = x - eps
ub = x + eps
lb[lb < 0] = 0
ub[ub > 1] = 1
return DeepPoly(lb, ub, None, None)


class DPBackSubstitution:
def __init__(self) -> None:
pass

def _get_lb(self, expr_w, expr_b):
if len(self.output_dp.lexpr[0].size()) == 2:
res_w = (
positive_only(expr_w) @ self.output_dp.lexpr[0]
+ negative_only(expr_w) @ self.output_dp.uexpr[0]
)
else:
res_w = (
positive_only(expr_w) * self.output_dp.lexpr[0]
+ negative_only(expr_w) * self.output_dp.uexpr[0]
)
res_b = (
positive_only(expr_w) @ self.output_dp.lexpr[1]
+ negative_only(expr_w) @ self.output_dp.uexpr[1]
+ expr_b
)

if self.prev_layer == None:
return (
positive_only(res_w) @ self.input_dp.lb
+ negative_only(res_w) @ self.input_dp.ub
+ res_b
)
else:
return self.prev_layer._get_lb(res_w, res_b)

def _get_ub(self, expr_w, expr_b):
if len(self.output_dp.lexpr[0].size()) == 2:
res_w = (
positive_only(expr_w) @ self.output_dp.uexpr[0]
+ negative_only(expr_w) @ self.output_dp.lexpr[0]
)
else:
res_w = (
positive_only(expr_w) * self.output_dp.uexpr[0]
+ negative_only(expr_w) * self.output_dp.lexpr[0]
)
res_b = (
positive_only(expr_w) @ self.output_dp.uexpr[1]
+ negative_only(expr_w) @ self.output_dp.lexpr[1]
+ expr_b
)

if self.prev_layer == None:
return (
positive_only(res_w) @ self.input_dp.ub
+ negative_only(res_w) @ self.input_dp.lb
+ res_b
)
else:
return self.prev_layer._get_ub(res_w, res_b)
class DPReLU(torch.nn.Module, DPBackSubstitution):
""" DeepPoly transformer for ReLU layer """
def __init__(self) -> None:
super(DPReLU, self).__init__()
def forward(self):
pass

class DPLinear(torch.nn.Module, DPBackSubstitution):
""" DeepPoly transformer for affine layer """
def __init__(self) -> None:
super().__init__()
def forward(self):
pass


def negative_only(w):
return -torch.relu(-w)


def positive_only(w):
return torch.relu(w)
LR = 0.02
num_iter = 100


# class DeepPoly:
# def __init__(self, lb, ub, lexpr, uexpr) -> None:
# """
# Arguments
# ---------
# lb : concrete lower bound
# ub: concrete upper bound
# lexpr: symbolic lower bound
# uexpr: symbolic upper bound
# """
# self.lb = lb
# self.ub = ub
# self.lexpr = lexpr
# self.uexpr = uexpr
# # TODO: remove asserts to improve speed
# assert not torch.isnan(self.lb).any()
# assert not torch.isnan(self.ub).any()
# assert lexpr is None or (
# (not torch.isnan(self.lexpr[0]).any())
# and (not torch.isnan(self.lexpr[1]).any())
# )
# assert uexpr is None or (
# (not torch.isnan(self.uexpr[0]).any())
# and (not torch.isnan(self.uexpr[1]).any())
# )
# # TODO: what is dim here
# self.dim = lb.size()[0]
#
# @staticmethod
# def deeppoly_from_perturbation(x, eps):
# assert eps >= 0, "epsilon must not be negative value"
# # We only have to verify images with pixel intensities between 0 and 1.
# # e.g. if perturbation is 0.2 and pixel value is 0.9
# # we verify range of [0.7, 1.0], instead of [0.7, 1.1]
# lb = x - eps
# ub = x + eps
# lb[lb < 0] = 0
# ub[ub > 1] = 1
# return DeepPoly(lb, ub, None, None)
#
#
#
# class DPBackSubstitution:
# def __init__(self) -> None:
# pass
#
# def _get_lb(self, expr_w, expr_b):
# if len(self.output_dp.lexpr[0].size()) == 2:
# res_w = (
# positive_only(expr_w) @ self.output_dp.lexpr[0]
# + negative_only(expr_w) @ self.output_dp.uexpr[0]
# )
# else:
# res_w = (
# positive_only(expr_w) * self.output_dp.lexpr[0]
# + negative_only(expr_w) * self.output_dp.uexpr[0]
# )
# res_b = (
# positive_only(expr_w) @ self.output_dp.lexpr[1]
# + negative_only(expr_w) @ self.output_dp.uexpr[1]
# + expr_b
# )
#
# if self.prev_layer == None:
# return (
# positive_only(res_w) @ self.input_dp.lb
# + negative_only(res_w) @ self.input_dp.ub
# + res_b
# )
# else:
# return self.prev_layer._get_lb(res_w, res_b)
#
# def _get_ub(self, expr_w, expr_b):
# if len(self.output_dp.lexpr[0].size()) == 2:
# res_w = (
# positive_only(expr_w) @ self.output_dp.uexpr[0]
# + negative_only(expr_w) @ self.output_dp.lexpr[0]
# )
# else:
# res_w = (
# positive_only(expr_w) * self.output_dp.uexpr[0]
# + negative_only(expr_w) * self.output_dp.lexpr[0]
# )
# res_b = (
# positive_only(expr_w) @ self.output_dp.uexpr[1]
# + negative_only(expr_w) @ self.output_dp.lexpr[1]
# + expr_b
# )
#
# if self.prev_layer == None:
# return (
# positive_only(res_w) @ self.input_dp.ub
# + negative_only(res_w) @ self.input_dp.lb
# + res_b
# )
# else:
# return self.prev_layer._get_ub(res_w, res_b)
#
# class DPReLU(torch.nn.Module, DPBackSubstitution):
# """ DeepPoly transformer for ReLU layer """
# def __init__(self) -> None:
# super(DPReLU, self).__init__()
#
# def forward(self):
# pass
#
# class DPLinear(torch.nn.Module, DPBackSubstitution):
# """ DeepPoly transformer for affine layer """
# def __init__(self) -> None:
# super().__init__()
#
# def forward(self):
# pass
#
#
# def negative_only(w):
# return -torch.relu(-w)
#
#
# def positive_only(w):
# return torch.relu(w)


def transform_image(pixel_values, input_dim):
Expand Down Expand Up @@ -206,24 +206,59 @@ def nomalize(value, inputs):
def analyze(net, inputs, eps, true_label):
low_bound = nomalize((inputs - eps).clamp(0, 1), inputs)
up_bound = nomalize((inputs + eps).clamp(0, 1), inputs)
verifiable_net = verifiable(net, inputs)
optimizer = optim.Adam(verifiable_net.parameters(), lr=LR)
for i in range(num_iter):
optimizer.zero_grad()

####################################### new added
for alpha in range(1, 8):
alpha = alpha / 10
verifiable_net = verifiable(net, inputs)
optimizer = optim.Adam(verifiable_net.parameters(), lr=LR)
for p in verifiable_net.parameters():
if p.requires_grad:
p.data.clamp_(alpha, alpha)

for i in range(num_iter):
optimizer.zero_grad()
verifier_output = verifiable_net(DeepPoly(low_bound.shape[0], low_bound, up_bound))
res = verifier_output.compute_verify_result(true_label)
if (res > 0).all():
break
loss = torch.log(-res[res < 0]).max()
loss.backward()
optimizer.step()
for p in verifiable_net.parameters():
if p.requires_grad:
p.data.clamp_(0, 0.7854)

verifier_output = verifiable_net(DeepPoly(low_bound.shape[0], low_bound, up_bound))
res = verifier_output.compute_verify_result(true_label)
if (res > 0).all():
return True
loss = torch.log(-res[res < 0]).max()
loss.backward()
optimizer.step()

verifier_output = verifiable_net(DeepPoly(low_bound.shape[0], low_bound, up_bound))
res = verifier_output.compute_verify_result(true_label)
if (res > 0).all():
return True
else:
return False
continue
else:
return False
return True
####################################### new added

# verifiable_net = verifiable(net, inputs)
# optimizer = optim.Adam(verifiable_net.parameters(), lr=LR)
# for i in range(num_iter):
# optimizer.zero_grad()
# verifier_output = verifiable_net(DeepPoly(low_bound.shape[0], low_bound, up_bound))
# res = verifier_output.compute_verify_result(true_label)
# if (res > 0).all():
# break
# loss = torch.log(-res[res < 0]).max()
# loss.backward()
# optimizer.step()
# for p in verifiable_net.parameters():
# if p.requires_grad:
# p.data.clamp_(0, 0.7854)
#
# verifier_output = verifiable_net(DeepPoly(low_bound.shape[0], low_bound, up_bound))
# res = verifier_output.compute_verify_result(true_label)
# if (res > 0).all():
# return True
# else:
# return False


def main():
Expand All @@ -237,7 +272,6 @@ def main():

inputs, true_label, eps = get_spec(args.spec, dataset)
net = get_net(args.net, net_name)
print("net: ", net)

outs = net(inputs)
pred_label = outs.max(dim=1)[1].item()
Expand Down

0 comments on commit 1b1f55d

Please sign in to comment.