forked from clementfarabet/lua---nnx
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Type.lua
55 lines (47 loc) · 1.69 KB
/
Type.lua
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
local Type, parent = torch.class('nn.Type', 'nn.Sequential')
function Type:__init(type)
parent.__init(self)
if not type:find('torch%..+Tensor') then
type = 'torch.' .. type .. 'Tensor'
end
self.type = type
self.defaulttype = torch.getdefaulttensortype()
self.convert_input = nn.Copy(self.defaulttype, self.type)
self.convert_gradOutput = nn.Copy(self.defaulttype, self.type)
self.convert_output = nn.Copy(self.type, self.defaulttype)
self.convert_gradInput = nn.Copy(self.type, self.defaulttype)
end
function Type:add(module)
parent.add(self, module:type(self.type))
return self
end
function Type:forward(input)
input = self.convert_input:forward(input)
local output = parent.forward(self, input)
self.output = self.convert_output:forward(output)
return self.output
end
function Type:backward(input, gradOutput)
input = self.convert_input:forward(input)
gradOutput = self.convert_gradOutput:forward(gradOutput)
local gradInput = parent.backward(self, input, gradOutput)
self.gradInput = self.convert_gradInput:forward(gradInput)
return self.gradInput
end
function Type:accGradParameters(input, gradOutput)
input = self.convert_input:forward(input)
gradOutput = self.convert_gradOutput:forward(gradOutput)
parent.accGradParameters(self, input, gradOutput)
end
local Float, parent = torch.class('nn.Float', 'nn.Type')
function Float:__init()
parent.__init(self, 'torch.FloatTensor')
end
local Double, parent = torch.class('nn.Double', 'nn.Type')
function Double:__init()
parent.__init(self, 'torch.DoubleTensor')
end
local Cuda, parent = torch.class('nn.Cuda', 'nn.Type')
function Cuda:__init()
parent.__init(self, 'torch.CudaTensor')
end