-
Notifications
You must be signed in to change notification settings - Fork 1
/
op.py
167 lines (131 loc) · 6.03 KB
/
op.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
# coding: utf-8
# In[1]:
import tensorflow as tf
import numpy as np
seed = 42
np.random.seed(seed)
tf.set_random_seed(seed)
# In[3]:
def conv2d(input_, output_dim, k_h = 3, k_w = 3, d_h = 2, d_w = 2, stddev = 2.0, name = "conv2d"):
"""Args :
input_: a feature map [batch_size, height, weight, input_dim]
output_dim: output feature map channels
k_h, k_w: kernel size[k_h, k_w, input_dim, output_dim]
d_h, d_w: stride[1, d_h, d_w, 1]
stddev: weight initializer sigma
name : scope
Return:
output feature map
"""
with tf.variable_scope(name):
prev_units = input_.get_shape()[-1]
stddev = np.sqrt(stddev / (np.sqrt(int(prev_units) * int(output_dim)) * k_h * k_w))
w = tf.get_variable('w', [k_h, k_w, prev_units, output_dim],
initializer = tf.truncated_normal_initializer(stddev = stddev))
conv = tf.nn.conv2d(input_, w, strides = [1, d_h, d_w, 1], padding = 'SAME')
b = tf.get_variable('biases', [output_dim], initializer = tf.constant_initializer(0.0))
conv = tf.nn.bias_add(conv, b)
return conv
# In[4]:
def deconv2d(input_, output_shape, k_h = 5, k_w = 5, d_h = 2, d_w = 2, stddev = 1.0, name = "deconv2d", input_dim = None):
"""Args :
input_: a feature map [batch_size, height, weight, input_dim]
output_shape: output feature map shape:[batch_size, height, weight, output_dim]
k_h, k_w: kernel size[k_h, k_w, output_dim, input_dim]
d_h, d_w: stride[1, d_h, d_w, 1]
stddev: weight initializer sigma
name : scope
Return:
output feature map
"""
with tf.variable_scope(name):
stddev = np.sqrt(stddev / (np.sqrt(int(input_.get_shape()[-1]) * int(output_shape[-1])) * k_h * k_w))
if not input_dim:
w = tf.get_variable('w', [k_h, k_w, output_shape[-1], input_.get_shape()[-1]],
initializer = tf.random_normal_initializer(stddev = stddev))
else:
w = tf.get_variable('w', [k_h, k_w, output_shape[-1], input_dim],
initializer = tf.random_normal_initializer(stddev = stddev))
deconv = tf.nn.conv2d_transpose(input_, w, output_shape = output_shape, strides = [1, d_h, d_w, 1])
bias = tf.get_variable('biases', [output_shape[-1]], initializer = tf.constant_initializer(0.0))
deconv = tf.nn.bias_add(deconv, bias)
return deconv
def upscale(X):
"""
upscales X by 2x through nearest neighbor interpolation
:param X: [batch_size, h, w, c]
:return: [batch_size, 2h, 2w, c]
"""
prev_shape = X.get_shape()
size = [2 * int(s) for s in prev_shape[1:3]]
out = tf.image.resize_nearest_neighbor(X, size)
return out
def residual_block(X, num_units, k_h = 3, k_w = 3, stddev_factor = 0.001, name = "res"):
"""
:param X: the input tensor of size[batch_size, h, w, c]
:param num_units: the output chanel
:param k_h: kernel size
:param k_w: kernel size
:param stddev_factor: the stddev of initial weight
:return: the output tensor of the residual block
"""
output_dim = X.get_shape()[3]
with tf.variable_scope(name):
X0 = relu(batch_norm(X, name = "bn0"))
h1 = relu(batch_norm(conv2d(X0, output_dim = output_dim, k_h = k_h, k_w = k_w, d_h = 1, d_w = 1, stddev= stddev_factor, name = "h1_conv"),name = "bn1"))
h2 = conv2d(h1, k_h = k_h, output_dim = output_dim, k_w = k_w, d_h = 1, d_w = 1, stddev = stddev_factor, name = "h2_conv")
return tf.add(X, h2)
def conv_cond_concat(x, y):
"""concatennate conditioning vector on feature map axis."""
x_shapes = x.get_shape()
y_shapes = y.get_shape()
return tf.concat([x, y * tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])], axis = 3)
def mean(X):
"""
:param X: a tensor of dim > 2,for example [batch_size, h, w, c]
:return: the averge of X [batch_size, -1]
"""
prev_shape = X.get_shape()
reduction_indices = list(range(len(prev_shape)))
reduction_indices = reduction_indices[1:-1]
out = tf.reduce_mean(X, reduction_indices = reduction_indices)
return out
def linear(input_, output_size, scope = None, stddev = 1.0, bias_start = 0.0):
"""Arg:
input_: input tensor of shape [batch_size, input_size]
output_size: output tensor dim
Return:
output tensor of shape [batch_size, output_size]
"""
shape = input_.get_shape().as_list()
with tf.variable_scope(scope or 'Linear'):
matrix = tf.get_variable("Matrix", [shape[1], output_size],
initializer = tf.random_normal_initializer(stddev = stddev))
bias = tf.get_variable("bias", [output_size], initializer = tf.constant_initializer(0.0))
return tf.matmul(input_, matrix) + bias
def lrelu(x, leak = 0.2, name = 'lrelu'):
return tf.maximum(x, leak * x)
def relu(x):
return tf.nn.relu(x)
def sigmoid(x):
return tf.nn.sigmoid(x)
def batch_norm( x, train = True, epsilon = 1e-5, momentum = 0.9, name = "batch_norm"):
return tf.contrib.layers.batch_norm(x, decay = momentum, updates_collections = None, epsilon = epsilon, scale = True,
is_training = train, scope = name)
def flatten(x):
return tf.contrib.layers.flatten(x)
def gaussian_noise_layer(input_layer, std = 0.5, istrain = True):
if istrain:
noise = tf.random_normal(shape = tf.shape(input_layer), mean = 0.0, stddev = std, dtype = tf.float32)
return input_layer + noise
else:
return input_layer
def downscale(X, k):
""" dowmscale a image by a factor of k,avg """
arr = np.zeros((k, k, 3, 3))
arr[:,:,0,0] = 1.0 / (k * k)
arr[:,:,1,1] = 1.0 / (k * k)
arr[:,:,2,2] = 1.0 / (k * k)
dowscale_weight = tf.constant(arr, dtype=tf.float32)
downscaled = tf.nn.conv2d(X, dowscale_weight, strides=[1, k, k, 1], padding = 'SAME')
return downscaled