-
Notifications
You must be signed in to change notification settings - Fork 24
/
Copy pathutils.py
46 lines (35 loc) · 1.61 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import torch
from torch import nn
import torch.nn.functional as F
class Conv1dSamePadding(nn.Conv1d):
"""Represents the "Same" padding functionality from Tensorflow.
See: https://github.com/pytorch/pytorch/issues/3867
Note that the padding argument in the initializer doesn't do anything now
"""
def forward(self, input):
return conv1d_same_padding(input, self.weight, self.bias, self.stride,
self.dilation, self.groups)
def conv1d_same_padding(input, weight, bias, stride, dilation, groups):
# stride and dilation are expected to be tuples.
kernel, dilation, stride = weight.size(2), dilation[0], stride[0]
l_out = l_in = input.size(2)
padding = (((l_out - 1) * stride) - l_in + (dilation * (kernel - 1)) + 1)
if padding % 2 != 0:
input = F.pad(input, [0, 1])
return F.conv1d(input=input, weight=weight, bias=bias, stride=stride,
padding=padding // 2,
dilation=dilation, groups=groups)
class ConvBlock(nn.Module):
def __init__(self, in_channels: int, out_channels: int, kernel_size: int,
stride: int) -> None:
super().__init__()
self.layers = nn.Sequential(
Conv1dSamePadding(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride),
nn.BatchNorm1d(num_features=out_channels),
nn.ReLU(),
)
def forward(self, x: torch.Tensor) -> torch.Tensor: # type: ignore
return self.layers(x)