@@ -39,7 +39,7 @@ def wrapper(self):
39
39
40
40
41
41
def length (data ):
42
- # with tf.variable_scope("rnn_length"): #FIXME no scope?
42
+ # with tf.variable_scope("rnn_length"):
43
43
used = tf .sign (tf .reduce_max (input_tensor = tf .abs (data ), axis = 2 ))
44
44
length = tf .reduce_sum (input_tensor = used , axis = 1 )
45
45
length = tf .cast (length , tf .int32 )
@@ -52,7 +52,7 @@ def fusedBN(input, scale, offset, mean, variance, training):
52
52
53
53
54
54
def last_relevant (output , length ):
55
- # with tf.variable_scope("rnn_last"): #FIXME no scope?
55
+ # with tf.variable_scope("rnn_last"):
56
56
batch_size = tf .shape (input = output )[0 ]
57
57
max_length = int (output .get_shape ()[1 ])
58
58
output_size = int (output .get_shape ()[2 ])
@@ -64,19 +64,19 @@ def last_relevant(output, length):
64
64
65
65
def conv2d (input , filters , seq ):
66
66
conv = tf .compat .v1 .layers .conv2d (
67
- # name="conv_lv{}".format(seq), #FIXME perhaps no name?
67
+ # name="conv_lv{}".format(seq),
68
68
inputs = input ,
69
69
filters = filters ,
70
70
kernel_size = 2 ,
71
71
kernel_initializer = tf .compat .v1 .truncated_normal_initializer (
72
72
stddev = 0.01 ),
73
73
bias_initializer = tf .compat .v1 .constant_initializer (0.1 ),
74
74
padding = "same" ,
75
- activation = tf .nn .elu ) # FIXME or perhaps relu6??
75
+ activation = tf .nn .elu )
76
76
h_stride = 2 if int (conv .get_shape ()[1 ]) >= 2 else 1
77
77
w_stride = 2 if int (conv .get_shape ()[2 ]) >= 2 else 1
78
78
pool = tf .compat .v1 .layers .max_pooling2d (
79
- # name="pool_lv{}".format(seq), #FIXME perhaps no name?
79
+ # name="pool_lv{}".format(seq),
80
80
inputs = conv , pool_size = 2 , strides = [h_stride , w_stride ],
81
81
padding = "same" )
82
82
# can't use tf.nn.batch_normalization in a mapped function
@@ -119,33 +119,33 @@ def prediction(self):
119
119
rnn = self .rnn (self , cnn )
120
120
dense = tf .compat .v1 .layers .dense (
121
121
inputs = rnn ,
122
- units = self ._num_hidden * 3 , # FIXME fallback to 3 * hidden size?
122
+ units = self ._num_hidden * 3 ,
123
123
kernel_initializer = tf .compat .v1 .truncated_normal_initializer (stddev = 0.01 ),
124
124
bias_initializer = tf .compat .v1 .constant_initializer (0.1 ),
125
- activation = tf .nn .elu ) # FIXME sure elu?
125
+ activation = tf .nn .elu )
126
126
dropout = tf .compat .v1 .layers .dropout (
127
127
inputs = dense , rate = 0.5 , training = self .training )
128
128
output = tf .compat .v1 .layers .dense (
129
129
inputs = dropout ,
130
130
units = int (self .target .get_shape ()[1 ]),
131
131
kernel_initializer = tf .compat .v1 .truncated_normal_initializer (stddev = 0.01 ),
132
132
bias_initializer = tf .compat .v1 .constant_initializer (0.1 ),
133
- activation = tf .nn .relu6 ) # FIXME fall back to relu6?
133
+ activation = tf .nn .relu6 )
134
134
return output
135
135
136
136
@staticmethod
137
137
def rnn (self , input ):
138
138
# Recurrent network.
139
139
cells = []
140
- state_size = self ._num_hidden # FIXME fallback to 128
140
+ state_size = self ._num_hidden
141
141
for _ in range (self ._num_layers ):
142
142
# Or LSTMCell(num_units), or use ConvLSTMCell?
143
143
cell = tf .compat .v1 .nn .rnn_cell .GRUCell (
144
144
state_size ,
145
145
kernel_initializer = tf .compat .v1 .truncated_normal_initializer (
146
146
stddev = 0.01 ),
147
147
bias_initializer = tf .compat .v1 .constant_initializer (0.1 ))
148
- # activation=None) # FIXME fall back to None?
148
+ # activation=None)
149
149
cells .append (cell )
150
150
cell = tf .compat .v1 .nn .rnn_cell .MultiRNNCell (cells )
151
151
_length = length (input )
@@ -167,7 +167,7 @@ def cnn2d(input, training, height):
167
167
accepts input shape: [step_size, time_shift*features]
168
168
transformed to: [step_size, time_shift(height), features(width), channel]
169
169
"""
170
- # with tf.variable_scope("conv2d_parent"): #FIXME no scope?
170
+ # with tf.variable_scope("conv2d_parent"):
171
171
print ("shape of cnn input: {}" .format (input .get_shape ()))
172
172
width = int (input .get_shape ()[1 ])// height
173
173
input2d = tf .reshape (input , [- 1 , height , width , 1 ])
@@ -191,17 +191,17 @@ def cnn2d(input, training, height):
191
191
convlayer = tf .squeeze (convlayer , [1 , 2 ])
192
192
print ("squeeze: {}" .format (convlayer .get_shape ()))
193
193
dense = tf .compat .v1 .layers .dense (
194
- # name="cnn2d_dense", #FIXME no name?
194
+ # name="cnn2d_dense",
195
195
inputs = convlayer ,
196
196
units = convlayer .get_shape ()[1 ]* 2 ,
197
197
kernel_initializer = tf .compat .v1 .truncated_normal_initializer (
198
198
stddev = 0.01 ),
199
199
bias_initializer = tf .compat .v1 .constant_initializer (0.1 ),
200
- activation = tf .nn .elu # FIXME or perhaps elu?
200
+ activation = tf .nn .elu
201
201
)
202
202
print ("dense: {}" .format (dense .get_shape ()))
203
203
dropout = tf .compat .v1 .layers .dropout (
204
- # name="cnn2d_dropout", #FIXME no name?
204
+ # name="cnn2d_dropout",
205
205
inputs = dense , rate = 0.5 , training = training )
206
206
return dropout
207
207
0 commit comments