forked from Element-Research/rnn
-
Notifications
You must be signed in to change notification settings - Fork 4
/
test.lua
4897 lines (4107 loc) · 174 KB
/
test.lua
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
local rnntest = {}
local precision = 1e-5
local mytester
local benchmark = false
local makeOldRecurrent_isdone = false
local function makeOldRecurrent()
if makeOldRecurrent_isdone then
return
end
makeOldRecurrent_isdone = true
-- I am making major modifications to nn.Recurrent.
-- So I want to make sure the new version matches the old
local AbstractRecurrent, parent = torch.class('nn.ARTest', 'nn.Container')
function AbstractRecurrent:__init(rho)
parent.__init(self)
self.rho = rho --the maximum number of time steps to BPTT
self.fastBackward = true
self.copyInputs = true
self.copyGradOutputs = true
self.inputs = {}
self.outputs = {}
self._gradOutputs = {}
self.gradOutputs = {}
self.scales = {}
self.gradParametersAccumulated = false
self.onlineBackward = false
self.step = 1
-- stores internal states of Modules at different time-steps
self.sharedClones = {}
self:reset()
end
function AbstractRecurrent:getStepModule(step)
assert(step, "expecting step at arg 1")
local recurrentModule = self.sharedClones[step]
if not recurrentModule then
recurrentModule = self.recurrentModule:stepClone()
self.sharedClones[step] = recurrentModule
end
return recurrentModule
end
function AbstractRecurrent:maskZero(nInputDim)
self.recurrentModule = nn.MaskZero(self.recurrentModule, nInputDim)
return self
end
function AbstractRecurrent:updateGradInput(input, gradOutput)
if self.onlineBackward then
-- updateGradInput will be called in reverse order of time
self.updateGradInputStep = self.updateGradInputStep or self.step
if self.copyGradOutputs then
self.gradOutputs[self.updateGradInputStep-1] = nn.rnn.recursiveCopy(self.gradOutputs[self.updateGradInputStep-1] , gradOutput)
else
self.gradOutputs[self.updateGradInputStep-1] = self.gradOutputs[self.updateGradInputStep-1] or nn.rnn.recursiveNew(gradOutput)
nn.rnn.recursiveSet(self.gradOutputs[self.updateGradInputStep-1], gradOutput)
end
-- BPTT for one time-step (rho = 1)
self.gradInput = self:updateGradInputThroughTime(self.updateGradInputStep, 1)
self.updateGradInputStep = self.updateGradInputStep - 1
assert(self.gradInput, "Missing gradInput")
return self.gradInput
else
-- Back-Propagate Through Time (BPTT) happens in updateParameters()
-- for now we just keep a list of the gradOutputs
if self.copyGradOutputs then
self.gradOutputs[self.step-1] = nn.rnn.recursiveCopy(self.gradOutputs[self.step-1] , gradOutput)
else
self.gradOutputs[self.step-1] = self.gradOutputs[self.step-1] or nn.rnn.recursiveNew(gradOutput)
nn.rnn.recursiveSet(self.gradOutputs[self.step-1], gradOutput)
end
end
end
function AbstractRecurrent:accGradParameters(input, gradOutput, scale)
if self.onlineBackward then
-- accGradParameters will be called in reverse order of time
assert(self.updateGradInputStep < self.step, "Missing updateGradInput")
self.accGradParametersStep = self.accGradParametersStep or self.step
self.scales[self.accGradParametersStep-1] = scale or 1
-- BPTT for one time-step (rho = 1)
self:accGradParametersThroughTime(self.accGradParametersStep, 1)
self.accGradParametersStep = self.accGradParametersStep - 1
else
-- Back-Propagate Through Time (BPTT) happens in updateParameters()
-- for now we just keep a list of the scales
self.scales[self.step-1] = scale or 1
end
end
function AbstractRecurrent:backwardUpdateThroughTime(learningRate)
local gradInput = self:updateGradInputThroughTime()
self:accUpdateGradParametersThroughTime(learningRate)
return gradInput
end
-- this is only useful when calling updateParameters directly on the rnn
-- Note that a call to updateParameters on an rnn container DOES NOT call this method
function AbstractRecurrent:updateParameters(learningRate)
if self.gradParametersAccumulated then
for i=1,#self.modules do
self.modules[i]:updateParameters(learningRate)
end
else
self:backwardUpdateThroughTime(learningRate)
end
end
-- goes hand in hand with the next method : forget()
-- this methods brings the oldest memory to the current step
function AbstractRecurrent:recycle(offset)
-- offset can be used to skip initialModule (if any)
offset = offset or 0
-- pad rho with one extra time-step of memory (helps for Sequencer:remember()).
-- also, rho could have been manually increased or decreased
local rho = math.max(self.rho+1, _.size(self.sharedClones) or 0)
if self.step > rho + offset then
assert(self.sharedClones[self.step] == nil)
self.sharedClones[self.step] = self.sharedClones[self.step-rho]
self.sharedClones[self.step-rho] = nil
end
rho = math.max(self.rho+1, _.size(self.outputs) or 0)
if self.step > rho + offset then
-- need to keep rho+1 of these
assert(self.outputs[self.step] == nil)
self.outputs[self.step] = self.outputs[self.step-rho-1]
self.outputs[self.step-rho-1] = nil
end
rho = math.max(self.rho+1, _.size(self.inputs) or 0)
if self.step > rho then
assert(self.inputs[self.step] == nil)
assert(self.gradOutputs[self.step] == nil)
assert(self._gradOutputs[self.step] == nil)
self.inputs[self.step] = self.inputs[self.step-rho]
self.inputs[self.step-rho] = nil
self.gradOutputs[self.step] = self.gradOutputs[self.step-rho]
self._gradOutputs[self.step] = self._gradOutputs[self.step-rho]
self.gradOutputs[self.step-rho] = nil
self._gradOutputs[self.step-rho] = nil
self.scales[self.step-rho] = nil
end
return self
end
-- this method brings all the memory back to the start
function AbstractRecurrent:forget(offset)
offset = offset or 0
-- bring all states back to the start of the sequence buffers
if self.train ~= false then
self.outputs = _.compact(self.outputs)
self.sharedClones = _.compact(self.sharedClones)
self.inputs = _.compact(self.inputs)
self.scales = {}
self.gradOutputs = _.compact(self.gradOutputs)
self._gradOutputs = _.compact(self._gradOutputs)
end
-- forget the past inputs; restart from first step
self.step = 1
return self
end
function AbstractRecurrent:includingSharedClones(f)
local modules = self.modules
local sharedClones = self.sharedClones
self.sharedClones = nil
self.modules = {}
for i,modules in ipairs{modules, sharedClones} do
for j, module in pairs(modules) do
table.insert(self.modules, module)
end
end
local r = f()
self.modules = modules
self.sharedClones = sharedClones
return r
end
function AbstractRecurrent:type(type)
return self:includingSharedClones(function()
return parent.type(self, type)
end)
end
function AbstractRecurrent:training()
return self:includingSharedClones(function()
return parent.training(self)
end)
end
function AbstractRecurrent:evaluate()
return self:includingSharedClones(function()
return parent.evaluate(self)
end)
end
function AbstractRecurrent:reinforce(reward)
return self:includingSharedClones(function()
return parent.reinforce(self, reward)
end)
end
function AbstractRecurrent:sharedClone(shareParams, shareGradParams, clones, pointers, stepClone)
if stepClone then
return self
else
return parent.sharedClone(self, shareParams, shareGradParams, clones, pointers, stepClone)
end
end
function AbstractRecurrent:backwardOnline(online)
self.onlineBackward = (online == nil) and true or online
end
function AbstractRecurrent:maxBPTTstep(rho)
self.rho = rho
end
-- backwards compatibility
AbstractRecurrent.recursiveResizeAs = rnn.recursiveResizeAs
AbstractRecurrent.recursiveSet = rnn.recursiveSet
AbstractRecurrent.recursiveCopy = rnn.recursiveCopy
AbstractRecurrent.recursiveAdd = rnn.recursiveAdd
AbstractRecurrent.recursiveTensorEq = rnn.recursiveTensorEq
AbstractRecurrent.recursiveNormal = rnn.recursiveNormal
local Recurrent, parent = torch.class('nn.ReTest', 'nn.ARTest')
function Recurrent:__init(start, input, feedback, transfer, rho, merge)
parent.__init(self, rho or 5)
local ts = torch.type(start)
if ts == 'torch.LongStorage' or ts == 'number' then
start = nn.Add(start)
elseif ts == 'table' then
start = nn.Add(torch.LongStorage(start))
elseif not torch.isTypeOf(start, 'nn.Module') then
error"Recurrent : expecting arg 1 of type nn.Module, torch.LongStorage, number or table"
end
self.startModule = start
self.inputModule = input
self.feedbackModule = feedback
self.transferModule = transfer or nn.Sigmoid()
self.mergeModule = merge or nn.CAddTable()
self.modules = {self.startModule, self.inputModule, self.feedbackModule, self.transferModule, self.mergeModule}
self:buildInitialModule()
self:buildRecurrentModule()
self.sharedClones[2] = self.recurrentModule
end
-- build module used for the first step (steps == 1)
function Recurrent:buildInitialModule()
self.initialModule = nn.Sequential()
self.initialModule:add(self.inputModule:sharedClone())
self.initialModule:add(self.startModule)
self.initialModule:add(self.transferModule:sharedClone())
end
-- build module used for the other steps (steps > 1)
function Recurrent:buildRecurrentModule()
local parallelModule = nn.ParallelTable()
parallelModule:add(self.inputModule)
parallelModule:add(self.feedbackModule)
self.recurrentModule = nn.Sequential()
self.recurrentModule:add(parallelModule)
self.recurrentModule:add(self.mergeModule)
self.recurrentModule:add(self.transferModule)
end
function Recurrent:updateOutput(input)
-- output(t) = transfer(feedback(output_(t-1)) + input(input_(t)))
local output
if self.step == 1 then
output = self.initialModule:updateOutput(input)
else
if self.train ~= false then
-- set/save the output states
self:recycle()
local recurrentModule = self:getStepModule(self.step)
-- self.output is the previous output of this module
output = recurrentModule:updateOutput{input, self.output}
else
-- self.output is the previous output of this module
output = self.recurrentModule:updateOutput{input, self.output}
end
end
if self.train ~= false then
local input_ = self.inputs[self.step]
self.inputs[self.step] = self.copyInputs
and nn.rnn.recursiveCopy(input_, input)
or nn.rnn.recursiveSet(input_, input)
end
self.outputs[self.step] = output
self.output = output
self.step = self.step + 1
self.gradPrevOutput = nil
self.updateGradInputStep = nil
self.accGradParametersStep = nil
self.gradParametersAccumulated = false
return self.output
end
-- not to be confused with the hit movie Back to the Future
function Recurrent:backwardThroughTime(timeStep, timeRho)
timeStep = timeStep or self.step
local rho = math.min(timeRho or self.rho, timeStep-1)
local stop = timeStep - rho
local gradInput
if self.fastBackward then
self.gradInputs = {}
for step=timeStep-1,math.max(stop, 2),-1 do
local recurrentModule = self:getStepModule(step)
-- backward propagate through this step
local input = self.inputs[step]
local output = self.outputs[step-1]
local gradOutput = self.gradOutputs[step]
if self.gradPrevOutput then
self._gradOutputs[step] = nn.rnn.recursiveCopy(self._gradOutputs[step], self.gradPrevOutput)
nn.rnn.recursiveAdd(self._gradOutputs[step], gradOutput)
gradOutput = self._gradOutputs[step]
end
local scale = self.scales[step]
gradInput, self.gradPrevOutput = unpack(recurrentModule:backward({input, output}, gradOutput, scale))
table.insert(self.gradInputs, 1, gradInput)
end
if stop <= 1 then
-- backward propagate through first step
local input = self.inputs[1]
local gradOutput = self.gradOutputs[1]
if self.gradPrevOutput then
self._gradOutputs[1] = nn.rnn.recursiveCopy(self._gradOutputs[1], self.gradPrevOutput)
nn.rnn.recursiveAdd(self._gradOutputs[1], gradOutput)
gradOutput = self._gradOutputs[1]
end
local scale = self.scales[1]
gradInput = self.initialModule:backward(input, gradOutput, scale)
table.insert(self.gradInputs, 1, gradInput)
end
self.gradParametersAccumulated = true
else
gradInput = self:updateGradInputThroughTime(timeStep, timeRho)
self:accGradParametersThroughTime(timeStep, timeRho)
end
return gradInput
end
function Recurrent:updateGradInputThroughTime(timeStep, rho)
assert(self.step > 1, "expecting at least one updateOutput")
timeStep = timeStep or self.step
self.gradInputs = {}
local gradInput
local rho = math.min(rho or self.rho, timeStep-1)
local stop = timeStep - rho
for step=timeStep-1,math.max(stop,2),-1 do
local recurrentModule = self:getStepModule(step)
-- backward propagate through this step
local input = self.inputs[step]
local output = self.outputs[step-1]
local gradOutput = self.gradOutputs[step]
if self.gradPrevOutput then
self._gradOutputs[step] = nn.rnn.recursiveCopy(self._gradOutputs[step], self.gradPrevOutput)
nn.rnn.recursiveAdd(self._gradOutputs[step], gradOutput)
gradOutput = self._gradOutputs[step]
end
gradInput, self.gradPrevOutput = unpack(recurrentModule:updateGradInput({input, output}, gradOutput))
table.insert(self.gradInputs, 1, gradInput)
end
if stop <= 1 then
-- backward propagate through first step
local input = self.inputs[1]
local gradOutput = self.gradOutputs[1]
if self.gradPrevOutput then
self._gradOutputs[1] = nn.rnn.recursiveCopy(self._gradOutputs[1], self.gradPrevOutput)
nn.rnn.recursiveAdd(self._gradOutputs[1], gradOutput)
gradOutput = self._gradOutputs[1]
end
gradInput = self.initialModule:updateGradInput(input, gradOutput)
table.insert(self.gradInputs, 1, gradInput)
end
return gradInput
end
function Recurrent:accGradParametersThroughTime(timeStep, rho)
timeStep = timeStep or self.step
local rho = math.min(rho or self.rho, timeStep-1)
local stop = timeStep - rho
for step=timeStep-1,math.max(stop,2),-1 do
local recurrentModule = self:getStepModule(step)
-- backward propagate through this step
local input = self.inputs[step]
local output = self.outputs[step-1]
local gradOutput = (step == self.step-1) and self.gradOutputs[step] or self._gradOutputs[step]
local scale = self.scales[step]
recurrentModule:accGradParameters({input, output}, gradOutput, scale)
end
if stop <= 1 then
-- backward propagate through first step
local input = self.inputs[1]
local gradOutput = (1 == self.step-1) and self.gradOutputs[1] or self._gradOutputs[1]
local scale = self.scales[1]
self.initialModule:accGradParameters(input, gradOutput, scale)
end
self.gradParametersAccumulated = true
return gradInput
end
function Recurrent:accUpdateGradParametersThroughInitialModule(lr, rho)
if self.initialModule:size() ~= 3 then
error("only works with Recurrent:buildInitialModule(). "..
"Reimplement this method to work with your subclass."..
"Or use accGradParametersThroughTime instead of accUpdateGrad...")
end
-- backward propagate through first step
local input = self.inputs[1]
local gradOutput = (1 == self.step-1) and self.gradOutputs[1] or self._gradOutputs[1]
local scale = self.scales[1]
local inputModule = self.initialModule:get(1)
local startModule = self.initialModule:get(2)
local transferModule = self.initialModule:get(3)
inputModule:accUpdateGradParameters(input, self.startModule.gradInput, lr*scale)
startModule:accUpdateGradParameters(inputModule.output, transferModule.gradInput, lr*scale)
transferModule:accUpdateGradParameters(startModule.output, gradOutput, lr*scale)
end
function Recurrent:accUpdateGradParametersThroughTime(lr, timeStep, rho)
timeStep = timeStep or self.step
local rho = math.min(rho or self.rho, timeStep-1)
local stop = timeStep - rho
for step=timeStep-1,math.max(stop,2),-1 do
local recurrentModule = self:getStepModule(step)
-- backward propagate through this step
local input = self.inputs[step]
local output = self.outputs[step-1]
local gradOutput = (step == self.step-1) and self.gradOutputs[step] or self._gradOutputs[step]
local scale = self.scales[step]
recurrentModule:accUpdateGradParameters({input, output}, gradOutput, lr*scale)
end
if stop <= 1 then
self:accUpdateGradParametersThroughInitialModule(lr, rho)
end
return gradInput
end
function Recurrent:recycle()
return parent.recycle(self, 1)
end
function Recurrent:forget()
return parent.forget(self, 1)
end
function Recurrent:includingSharedClones(f)
local modules = self.modules
self.modules = {}
local sharedClones = self.sharedClones
self.sharedClones = nil
local initModule = self.initialModule
self.initialModule = nil
for i,modules in ipairs{modules, sharedClones, {initModule}} do
for j, module in pairs(modules) do
table.insert(self.modules, module)
end
end
local r = f()
self.modules = modules
self.sharedClones = sharedClones
self.initialModule = initModule
return r
end
end
function rnntest.Recurrent_old()
-- make sure the new version is still as good as the last version
makeOldRecurrent()
local batchSize = 2
local hiddenSize = 10
local nStep = 3
-- recurrent neural network
local rnn = nn.Recurrent(
hiddenSize,
nn.Linear(hiddenSize, hiddenSize),
nn.Linear(hiddenSize, hiddenSize),
nn.ReLU(), 99999
)
local rnn2 = nn.ReTest(
rnn.startModule:clone(),
rnn.inputModule:clone(),
rnn.feedbackModule:clone(),
nn.ReLU(), 99999
)
local inputs, gradOutputs = {}, {}
local inputs2, gradOutputs2 = {}, {}
for i=1,nStep do
inputs[i] = torch.randn(batchSize, hiddenSize)
gradOutputs[i] = torch.randn(batchSize, hiddenSize)
inputs2[i] = inputs[i]:clone()
gradOutputs2[i] = gradOutputs[i]:clone()
end
local params, gradParams = rnn:getParameters()
local params2, gradParams2 = rnn2:getParameters()
for j=1,3 do
rnn:forget()
rnn2:forget()
rnn:zeroGradParameters()
rnn2:zeroGradParameters()
-- forward
for i=1,nStep do
local output = rnn:forward(inputs[i])
local output2 = rnn2:forward(inputs2[i])
mytester:assertTensorEq(output, output2, 0.000001, "Recurrent_old output err "..i)
rnn2:backward(inputs[i], gradOutputs2[i])
end
-- backward
rnn2:backwardThroughTime()
for i=nStep,1,-1 do
local gradInput = rnn:backward(inputs[i], gradOutputs[i])
mytester:assertTensorEq(gradInput, rnn2.gradInputs[i], 0.000001, "Recurrent_old gradInput err "..i)
end
local p1, gp1 = rnn:parameters()
local p2, gp2 = rnn2:parameters()
for i=1,#gp1 do
mytester:assertTensorEq(gp1[i], gp2[i], 0.00000001, "Recurrent_old gradParams err "..i)
end
mytester:assertTensorEq(gradParams, gradParams2, 0.00000001, "Recurrent_old gradParams error")
rnn2:updateParameters(0.1)
rnn:updateParameters(0.1)
end
if not pcall(function() require 'optim' end) then return end
local hiddenSize = 2
local rnn = nn.Recurrent(hiddenSize, nn.Linear(hiddenSize, hiddenSize), nn.Linear(hiddenSize, hiddenSize))
local criterion = nn.MSECriterion()
local sequence = torch.randn(4,2)
local s = sequence:clone()
local parameters, grads = rnn:getParameters()
function f(x)
parameters:copy(x)
-- Do the forward prop
rnn:zeroGradParameters()
assert(grads:sum() == 0)
local err = 0
local outputs = {}
for i = 1, sequence:size(1) - 1 do
local output = rnn:forward(sequence[i])
outputs[i] = output
err = err + criterion:forward(output, sequence[i + 1])
end
for i=sequence:size(1)-1,1,-1 do
criterion:forward(outputs[i], sequence[i + 1])
local gradOutput = criterion:backward(outputs[i], sequence[i + 1])
rnn:backward(sequence[i], gradOutput)
end
rnn:forget()
return err, grads
end
function optim.checkgrad(opfunc, x, eps)
-- compute true gradient:
local _,dC = opfunc(x)
dC:resize(x:size())
-- compute numeric approximations to gradient:
local eps = eps or 1e-7
local dC_est = torch.DoubleTensor(dC:size())
for i = 1,dC:size(1) do
x[i] = x[i] + eps
local C1 = opfunc(x)
x[i] = x[i] - 2 * eps
local C2 = opfunc(x)
x[i] = x[i] + eps
dC_est[i] = (C1 - C2) / (2 * eps)
end
-- estimate error of gradient:
local diff = torch.norm(dC - dC_est) / torch.norm(dC + dC_est)
return diff,dC,dC_est
end
local err = optim.checkgrad(f, parameters:clone())
mytester:assert(err < 0.0001, "Recurrent optim.checkgrad error")
end
function rnntest.Recurrent()
local batchSize = 4
local dictSize = 100
local hiddenSize = 12
local outputSize = 7
local nStep = 5
local inputModule = nn.LookupTable(dictSize, outputSize)
local transferModule = nn.Sigmoid()
-- test MLP feedback Module (because of Module:representations())
local feedbackModule = nn.Sequential()
feedbackModule:add(nn.Linear(outputSize, hiddenSize))
feedbackModule:add(nn.Sigmoid())
feedbackModule:add(nn.Linear(hiddenSize, outputSize))
-- rho = nStep
local mlp = nn.Recurrent(outputSize, inputModule, feedbackModule, transferModule:clone(), nStep)
local gradOutputs, outputs = {}, {}
-- inputs = {inputN, {inputN-1, {inputN-2, ...}}}}}
local inputs
local startModule = mlp.startModule:clone()
inputModule = mlp.inputModule:clone()
feedbackModule = mlp.feedbackModule:clone()
local mlp6 = mlp:clone()
mlp6:evaluate()
mlp:zeroGradParameters()
local mlp7 = mlp:clone()
mlp7.rho = nStep - 1
local inputSequence, gradOutputSequence = {}, {}
for step=1,nStep do
local input = torch.IntTensor(batchSize):random(1,dictSize)
inputSequence[step] = input
local gradOutput
if step ~= nStep then
-- for the sake of keeping this unit test simple,
gradOutput = torch.zeros(batchSize, outputSize)
else
-- only the last step will get a gradient from the output
gradOutput = torch.randn(batchSize, outputSize)
end
gradOutputSequence[step] = gradOutput
local output = mlp:forward(input)
local output6 = mlp6:forward(input)
mytester:assertTensorEq(output, output6, 0.000001, "evaluation error "..step)
local output7 = mlp7:forward(input)
mytester:assertTensorEq(output, output7, 0.000001, "rho = nStep-1 forward error "..step)
table.insert(gradOutputs, gradOutput)
table.insert(outputs, output:clone())
if inputs then
inputs = {input, inputs}
else
inputs = input
end
end
local mlp5 = mlp:clone()
-- backward propagate through time (BPTT)
local gradInputs1 = {}
local gradInputs7 = {}
for step=nStep,1,-1 do
table.insert(gradInputs1, mlp:backward(inputSequence[step], gradOutputSequence[step]))
if step > 1 then -- rho = nStep - 1 : shouldn't update startModule
table.insert(gradInputs7, mlp7:backward(inputSequence[step], gradOutputSequence[step]))
end
end
local gradInput = gradInputs1[1]:clone()
mlp:forget() -- test ability to forget
mlp:zeroGradParameters()
local foutputs = {}
for step=1,nStep do
foutputs[step] = mlp:forward(inputSequence[step])
mytester:assertTensorEq(foutputs[step], outputs[step], 0.00001, "Recurrent forget output error "..step)
end
local fgradInput
for step=nStep,1,-1 do
fgradInput = mlp:backward(inputSequence[step], gradOutputs[step])
end
fgradInput = fgradInput:clone()
mytester:assertTensorEq(gradInput, fgradInput, 0.00001, "Recurrent forget gradInput error")
local mlp10 = mlp7:clone()
mlp10:forget()
mytester:assert(#mlp10.outputs == 0, 'forget outputs error')
local i = 0
for k,v in pairs(mlp10.sharedClones) do
i = i + 1
end
mytester:assert(i == 4, 'forget recurrentOutputs error')
local mlp2 -- this one will simulate rho = nStep
local outputModules = {}
for step=1,nStep do
local inputModule_ = inputModule:sharedClone()
local outputModule = transferModule:clone()
table.insert(outputModules, outputModule)
if step == 1 then
local initialModule = nn.Sequential()
initialModule:add(inputModule_)
initialModule:add(startModule)
initialModule:add(outputModule)
mlp2 = initialModule
else
local parallelModule = nn.ParallelTable()
parallelModule:add(inputModule_)
local pastModule = nn.Sequential()
pastModule:add(mlp2)
local feedbackModule_ = feedbackModule:sharedClone()
pastModule:add(feedbackModule_)
parallelModule:add(pastModule)
local recurrentModule = nn.Sequential()
recurrentModule:add(parallelModule)
recurrentModule:add(nn.CAddTable())
recurrentModule:add(outputModule)
mlp2 = recurrentModule
end
end
local output2 = mlp2:forward(inputs)
mlp2:zeroGradParameters()
-- unlike mlp2, mlp8 will simulate rho = nStep -1
local mlp8 = mlp2:clone()
local inputModule8 = mlp8.modules[1].modules[1]
local m = mlp8.modules[1].modules[2].modules[1].modules[1].modules[2]
m = m.modules[1].modules[1].modules[2].modules[1].modules[1].modules[2]
local feedbackModule8 = m.modules[2]
local startModule8 = m.modules[1].modules[2] -- before clone
-- unshare the intialModule:
m.modules[1] = m.modules[1]:clone()
m.modules[2] = m.modules[2]:clone()
mlp8:backward(inputs, gradOutputs[#gradOutputs])
local gradInput2 = mlp2:backward(inputs, gradOutputs[#gradOutputs])
for step=1,nStep-1 do
gradInput2 = gradInput2[2]
end
mytester:assertTensorEq(gradInput, gradInput2, 0.000001, "recurrent gradInput")
mytester:assertTensorEq(outputs[#outputs], output2, 0.000001, "recurrent output")
for step=1,nStep do
local output, outputModule = outputs[step], outputModules[step]
mytester:assertTensorEq(output, outputModule.output, 0.000001, "recurrent output step="..step)
end
local mlp3 = nn.Sequential()
-- contains params and grads of mlp2 (the MLP version of the Recurrent)
mlp3:add(startModule):add(inputModule):add(feedbackModule)
local params2, gradParams2 = mlp3:parameters()
local params, gradParams = mlp:parameters()
mytester:assert(_.size(params2) == _.size(params), 'missing parameters')
mytester:assert(_.size(gradParams) == _.size(params), 'missing gradParameters')
mytester:assert(_.size(gradParams2) == _.size(params), 'missing gradParameters2')
for i,v in pairs(params) do
mytester:assertTensorEq(gradParams[i], gradParams2[i], 0.000001, 'gradParameter error ' .. i)
end
local mlp9 = nn.Sequential()
-- contains params and grads of mlp8
mlp9:add(startModule8):add(inputModule8):add(feedbackModule8)
local params9, gradParams9 = mlp9:parameters()
local params7, gradParams7 = mlp7:parameters()
mytester:assert(#_.keys(params9) == #_.keys(params7), 'missing parameters')
mytester:assert(#_.keys(gradParams7) == #_.keys(params7), 'missing gradParameters')
for i,v in pairs(params7) do
mytester:assertTensorEq(gradParams7[i], gradParams9[i], 0.00001, 'gradParameter error ' .. i)
end
mlp:updateParameters(0.1)
local params5 = mlp5:sparseParameters()
local params = mlp:sparseParameters()
for k,v in pairs(params) do
if params5[k] then
mytester:assertTensorNe(params[k], params5[k], 0.0000000001, 'backwardThroughTime error ' .. i)
end
end
end
function rnntest.Recurrent_oneElement()
-- test sequence of one element
local x = torch.rand(200)
local target = torch.rand(2)
local rho = 5
local hiddenSize = 100
-- RNN
local r = nn.Recurrent(
hiddenSize, nn.Linear(200,hiddenSize),
nn.Linear(hiddenSize, hiddenSize), nn.Sigmoid(),
rho
)
local seq = nn.Sequential()
seq:add(r)
seq:add(nn.Linear(hiddenSize, 2))
local criterion = nn.MSECriterion()
local output = seq:forward(x)
local err = criterion:forward(output,target)
local gradOutput = criterion:backward(output,target)
seq:backward(x,gradOutput)
seq:updateParameters(0.01)
end
function rnntest.Recurrent_TestTable()
-- Set up RNN where internal state is a table.
-- Trivial example is same RNN from rnntest.Recurrent test
-- but all layers are duplicated
local batchSize = 4
local inputSize = 10
local hiddenSize = 12
local outputSize = 7
local nStep = 10
local inputModule = nn.Linear(inputSize, outputSize)
local transferModule = nn.Sigmoid()
local learningRate = 0.1
-- test MLP feedback Module
local feedbackModule = nn.Sequential()
feedbackModule:add(nn.Linear(outputSize, hiddenSize))
feedbackModule:add(nn.Sigmoid())
feedbackModule:add(nn.Linear(hiddenSize, outputSize))
-- rho = nStep
local mlp = nn.Recurrent(
nn.ParallelTable()
:add(nn.Add(outputSize))
:add(nn.Add(outputSize)),
nn.ParallelTable()
:add(inputModule:clone())
:add(inputModule:clone()),
nn.ParallelTable()
:add(feedbackModule:clone())
:add(feedbackModule:clone()),
nn.ParallelTable()
:add(transferModule:clone())
:add(transferModule:clone()),
nStep,
nn.ParallelTable()
:add(nn.CAddTable())
:add(nn.CAddTable())
)
local input = torch.randn(batchSize, inputSize)
local err = torch.randn(batchSize, outputSize)
for i=1,nStep do
mlp:forward{input, input:clone()}
end
for i=nStep,1,-1 do
mlp:backward({input, input:clone()}, {err, err:clone()})
end
end
function rnntest.LSTM_main()
local batchSize = math.random(1,2)
local inputSize = math.random(3,4)
local outputSize = math.random(5,6)
local nStep = 3
local input = {}
local gradOutput = {}
for step=1,nStep do
input[step] = torch.randn(batchSize, inputSize)
if step == nStep then
-- for the sake of keeping this unit test simple,
gradOutput[step] = torch.randn(batchSize, outputSize)
else
-- only the last step will get a gradient from the output
gradOutput[step] = torch.zeros(batchSize, outputSize)
end
end
local lstm = nn.LSTM(inputSize, outputSize)
-- we will use this to build an LSTM step by step (with shared params)
local lstmStep = lstm.recurrentModule:clone()
-- forward/backward through LSTM
local output = {}
lstm:zeroGradParameters()
for step=1,nStep do
output[step] = lstm:forward(input[step])
assert(torch.isTensor(input[step]))
end
local gradInputs = {}
for step=nStep,1,-1 do
gradInputs[step] = lstm:backward(input[step], gradOutput[step], 1)
end
local gradInput = gradInputs[1]
local mlp2 -- this one will simulate rho = nStep
local inputs
for step=1,nStep do
-- iteratively build an LSTM out of non-recurrent components
local lstm = lstmStep:clone()
lstm:share(lstmStep, 'weight', 'gradWeight', 'bias', 'gradBias')
if step == 1 then
mlp2 = lstm
else
local rnn = nn.Sequential()
local para = nn.ParallelTable()
para:add(nn.Identity()):add(mlp2)
rnn:add(para)
rnn:add(nn.FlattenTable())
rnn:add(lstm)
mlp2 = rnn
end
-- prepare inputs for mlp2
if inputs then
inputs = {input[step], inputs}
else
inputs = {input[step], torch.zeros(batchSize, outputSize), torch.zeros(batchSize, outputSize)}
end
end
mlp2:add(nn.SelectTable(1)) --just output the output (not cell)
local output2 = mlp2:forward(inputs)
mlp2:zeroGradParameters()
local gradInput2 = mlp2:backward(inputs, gradOutput[nStep], 1) --/nStep)
mytester:assertTensorEq(gradInput2[2][2][1], gradInput, 0.00001, "LSTM gradInput error")
mytester:assertTensorEq(output[nStep], output2, 0.00001, "LSTM output error")
local params, gradParams = lstm:parameters()
local params2, gradParams2 = lstmStep:parameters()
mytester:assert(#params == #params2, "LSTM parameters error "..#params.." ~= "..#params2)
for i, gradParam in ipairs(gradParams) do
local gradParam2 = gradParams2[i]
mytester:assertTensorEq(gradParam, gradParam2, 0.000001,
"LSTM gradParam "..i.." error "..tostring(gradParam).." "..tostring(gradParam2))
end
gradParams = lstm.recursiveCopy(nil, gradParams)
gradInput = gradInput:clone()
mytester:assert(lstm.zeroTensor:sum() == 0, "zeroTensor error")
lstm:forget()
output = lstm.recursiveCopy(nil, output)
local output3 = {}
lstm:zeroGradParameters()
for step=1,nStep do
output3[step] = lstm:forward(input[step])
end
local gradInputs3 = {}
for step=nStep,1,-1 do