Skip to content

Commit

Permalink
Merge pull request #6 from kjw0612/deep_supervised_rnn
Browse files Browse the repository at this point in the history
Deep supervised rnn
  • Loading branch information
kjw0612 committed Oct 11, 2015
2 parents 9bcb27c + 10f7339 commit deda607
Show file tree
Hide file tree
Showing 4 changed files with 50 additions and 36 deletions.
19 changes: 10 additions & 9 deletions rcn_dag.m
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,10 @@
opts.gpus = 2;
opts.resid = 1;
opts.recursive = 1;
opts.dropout = 1;
opts.dropout = 0;
opts.depth = 10; % 10 optimal5
opts.filterSize = 512;
opts.filterSize = 64;
if opts.dropout, opts.filterSize = opts.filterSize * 8; end
opts.pad = 0;
opts.useBnorm = false;
exp_name = 'exp';
Expand All @@ -47,7 +48,7 @@
exp_name = strcat(exp_name, '_N', num2str(problem.v));
end
end
exp_name = sprintf('%s_resid%d_depth%d', exp_name, opts.resid, opts.depth);
exp_name = sprintf('multi_obj_%s_resid%d_depth%d', exp_name, opts.resid, opts.depth);
opts.expDir = fullfile('data','exp',exp_name);
opts.dataDir = fullfile('data', '91');
opts.imdbPath = fullfile(opts.expDir, 'imdb.mat');
Expand All @@ -57,7 +58,7 @@
if opts.dropout, rep = rep * 5; end
opts.train.learningRate = [0.1*ones(1,rep) 0.01*ones(1,rep) 0.001*ones(1,rep) 0.0001*ones(1,rep)];%*0.99 .^ (0:500);
opts.train.numEpochs = numel(opts.train.learningRate);
opts.train.continue = 1;
opts.train.continue = 0;
opts.train.gradRange = 1e-4;
opts.train.sync = true;
opts.train.expDir = opts.expDir;
Expand All @@ -73,15 +74,15 @@
% Prepare data
% --------------------------------------------------------------------

if exist(opts.imdbPath, 'file')
if exist(opts.imdbPath, 'file') & 0
imdb = load(opts.imdbPath) ;
else
imdb = getRcnImdb(opts.dataDir, opts.problems, opts.depth, opts.pad, opts.resid);
mkdir(opts.expDir) ;
save(opts.imdbPath, '-struct', 'imdb') ;
%save(opts.imdbPath, '-struct', 'imdb') ;
end

net = rcn_init_dag(opts);
[net, opts.train.derOutputs] = rcn_init_dag(opts);
net.initParams();
%net = dagnn.DagNN.fromSimpleNN(net) ;
%net.addLayer('error', dagnn.Loss('loss', 'classerror'), ...
Expand All @@ -108,8 +109,8 @@

function imdb = getRcnImdb(dataDir, problems, depth, pad, diff)
f_lst = dir(fullfile(dataDir, '*.*'));
ps = (2*depth+1); % patch size
stride = ps;%ps - 2*pad;
ps = 2*depth+1; % patch size
stride = ps;%31;%ps - 2*pad;

nPatches = 0;
for f_iter = 1:numel(f_lst)
Expand Down
47 changes: 21 additions & 26 deletions rcn_experiments.m
Original file line number Diff line number Diff line change
@@ -1,38 +1,33 @@
%% Batch normalization effect experiment
%% Experiment Framework

[net_bn, info_bn] = rcn(...
'useBnorm', true);

[net_fc, info_fc] = rcn(...
'useBnorm', false);
net = {};
info = {};
exp_name = {};
for i = 1:9
[net{end+1}, info{end+1}] = rcn_dag('filterSize', 2^i);
exp_name{end+1} = sprintf('filterSize %d (no dropout)', 2^i);
end

%%
figure(1) ; clf ;
subplot(1,2,1) ;
semilogy(info_fc.val.objective, 'k') ; hold on ;
semilogy(info_bn.val.objective, 'b') ;
xlabel('Training samples [x10^3]'); ylabel('energy') ;
for i = 1:numel(net)
val = zeros(1,numel(info{1}.val));
val(:) = info{i}.val.objective;
plot(val) ; hold on ;
end
xlabel('Training samples [x10^3]'); ylabel('objective (val)') ;
grid on ;
h=legend('BASE', 'BNORM') ;
set(h,'color','none');
h=legend(exp_name) ;
title('objective') ;

subplot(1,2,2) ;
nProblem = numel(info_fc.test.error);
base = info_fc.test.error{1}.ours;
bnorm = info_bn.test.error{1}.ours;
for problem_iter = 2:nProblem
base = base + info_fc.test.error{problem_iter}.ours;
bnorm = bnorm + info_bn.test.error{problem_iter}.ours;
end
base = base / nProblem;
bnorm = bnorm / nProblem;
subplot(1,2,2);

plot(base, 'k') ; hold on ; % first row for top1e
plot(bnorm, 'b') ;
h=legend('BASE','BNORM') ;
for i =1:numel(net)
plot(info{i}.test) ; hold on ;
end
h=legend(exp_name, 'location', 'southeast') ;
grid on ;
xlabel('Training samples [x10^3]'); ylabel('error') ;
set(h,'color','none') ;
title('error') ;
title('PSNR') ;
drawnow ;
17 changes: 16 additions & 1 deletion rcn_init_dag.m
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
function net = rcn_init_dag(opts)
function [net, derOutputs] = rcn_init_dag(opts)
% define net
net = dagnn.DagNN() ;

Expand Down Expand Up @@ -28,6 +28,15 @@
net.addLayer(['bnorm',num2str(i)], dagnn.BatchNorm(), {['x',num2str(x)]}, {['x',num2str(x+1)]}, {}) ;
x = x + 1;
end

if i < opts.depth - 1
init = [0.001, 0.5];
if opts.resid, init(2)=0; end
convBlock = dagnn.Conv('size', [3,3,opts.filterSize,1], 'hasBias', true, 'init', init, 'pad', 1);
net.addLayer(sprintf('conv_out%d',i), convBlock, {sprintf('x%d',x)}, {sprintf('prediction%d',i)}, {['filters',num2str(opts.depth)], ['biases',num2str(opts.depth)]});
net.addLayer(sprintf('objective%d',i), dagnn.EuclidLoss(), ...
{sprintf('prediction%d',i),'label'}, sprintf('objective%d',i)) ;
end
end
init = [0.001, 0.5];
if opts.resid, init(2)=0; end
Expand All @@ -36,3 +45,9 @@

net.addLayer('objective', dagnn.EuclidLoss(), ...
{'prediction','label'}, 'objective') ;

derOutputs = {'objective', 1};
for i=2:opts.depth-2
derOutputs{end+1}=sprintf('objective%d',i);
derOutputs{end+1}=i/100;
end
3 changes: 3 additions & 0 deletions rcn_train_dag.m
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,10 @@
end

if numel(opts.gpus)>0, net.move('gpu'); end
backupmode = net.mode;
net.mode = 'test';
[baseline_psnr, stats.test(epoch)] = evalTest(epoch, opts, net);
net.mode = backupmode;
net.reset();
if numel(opts.gpus)>0, net.move('cpu'); end

Expand Down

0 comments on commit deda607

Please sign in to comment.