-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathutils.py
102 lines (73 loc) · 3.32 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
import numpy as np
from torch import nn
from torch import autograd
import torch
import os
import pdb
class Concat_embed(nn.Module):
def __init__(self, embed_dim, projected_embed_dim):
super(Concat_embed, self).__init__()
self.projection = nn.Sequential(
nn.Linear(in_features=embed_dim, out_features=projected_embed_dim),
nn.BatchNorm1d(num_features=projected_embed_dim),
nn.LeakyReLU(negative_slope=0.2, inplace=True)
)
def forward(self, inp, embed):
projected_embed = self.projection(embed)
replicated_embed = projected_embed.repeat(4, 4, 1, 1).permute(2, 3, 0, 1)
hidden_concat = torch.cat([inp, replicated_embed], 1)
return hidden_concat
class minibatch_discriminator(nn.Module):
def __init__(self, num_channels, B_dim, C_dim):
super(minibatch_discriminator, self).__init__()
self.B_dim = B_dim
self.C_dim =C_dim
self.num_channels = num_channels
T_init = torch.randn(num_channels * 4 * 4, B_dim * C_dim) * 0.1
self.T_tensor = nn.Parameter(T_init, requires_grad=True)
def forward(self, inp):
inp = inp.view(-1, self.num_channels * 4 * 4)
M = inp.mm(self.T_tensor)
M = M.view(-1, self.B_dim, self.C_dim)
op1 = M.unsqueeze(3)
op2 = M.permute(1, 2, 0).unsqueeze(0)
output = torch.sum(torch.abs(op1 - op2), 2)
output = torch.sum(torch.exp(-output), 2)
output = output.view(M.size(0), -1)
output = torch.cat((inp, output), 1)
return output
class Utils(object):
@staticmethod
def smooth_label(tensor, offset):
return tensor + offset
@staticmethod
# based on: https://github.com/caogang/wgan-gp/blob/master/gan_cifar10.py
def compute_GP(netD, real_data, real_embed, fake_data, LAMBDA):
BATCH_SIZE = real_data.size(0)
alpha = torch.rand(BATCH_SIZE, 1)
alpha = alpha.expand(BATCH_SIZE, int(real_data.nelement() / BATCH_SIZE)).contiguous().view(BATCH_SIZE, 3, 64, 64)
alpha = alpha.cuda()
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
interpolates = interpolates.cuda()
interpolates = autograd.Variable(interpolates, requires_grad=True)
disc_interpolates, _ = netD(interpolates, real_embed)
gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).cuda(),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * LAMBDA
return gradient_penalty
@staticmethod
def save_checkpoint(netD, netG, dir_path, subdir_path, epoch):
path = os.path.join(dir_path, subdir_path)
if not os.path.exists(path):
os.makedirs(path)
torch.save(netD.state_dict(), '{0}/disc_{1}.pth'.format(path, epoch))
torch.save(netG.state_dict(), '{0}/gen_{1}.pth'.format(path, epoch))
@staticmethod
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)