-
Notifications
You must be signed in to change notification settings - Fork 0
/
cnnTrain.m
93 lines (72 loc) · 3.33 KB
/
cnnTrain.m
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
%% Convolution Neural Network Exercise
% Instructions
% ------------
%
% This file contains code that helps you get started in building a single.
% layer convolutional nerual network. In this exercise, you will only
% need to modify cnnCost.m and cnnminFuncSGD.m. You will not need to
% modify this file.
%%======================================================================
%% STEP 0: Initialize Parameters and Load Data
% Here we initialize some parameters used for the exercise.
imageDim = 28;
numClasses = 62; % Number of classes (MNIST images fall into 10 classes)
filterDim = 5; % Filter size for conv layer
numFilters = 50; % Number of filters for conv layer
poolDim = 2; % Pooling dimension, (should divide imageDim-filterDim+1)
%Load images
%importC74k;
% Initialize Parameters
theta = cnnInitParams(imageDim,filterDim,numFilters,poolDim,numClasses);
%%======================================================================
%% STEP 1: Implement convNet Objective
% Implement the function cnnCost.m.
%%======================================================================
%% STEP 2: Gradient Check
% Use the file computeNumericalGradient.m to check the gradient
% calculation for your cnnCost.m function. You may need to add the
% appropriate path or copy the file to this directory.
DEBUG=false; % set this to true to check gradient
if DEBUG
% To speed up gradient checking, we will use a reduced network and
% a debugging data set
db_numFilters = 2;
db_filterDim = 9;
db_poolDim = 5;
db_images = images(:,:,1:10);
db_labels = labels(1:10);
db_theta = cnnInitParams(imageDim,db_filterDim,db_numFilters,...
db_poolDim,numClasses);
[cost grad] = cnnCost(db_theta,db_images,db_labels,numClasses,...
db_filterDim,db_numFilters,db_poolDim);
% Check gradients
numGrad = computeNumericalGradient( @(x) cnnCost(x,db_images,...
db_labels,numClasses,db_filterDim,...
db_numFilters,db_poolDim), db_theta);
% Use this to visually compare the gradients side by side
disp([numGrad grad]);
diff = norm(numGrad-grad)/norm(numGrad+grad);
% Should be small. In our implementation, these values are usually
% less than 1e-9.
disp(diff);
assert(diff < 1e-9,...
'Difference too large. Check your gradient computation again');
end;
%%======================================================================
%% STEP 3: Learn Parameters
% Implement minFuncSGD.m, then train the model.
options.epochs = 3;
options.minibatch = 256;
options.alpha = 1e-1;
options.momentum = .95;
opttheta = minFuncSGD(@(x,y,z) cnnCost(x,y,z,numClasses,filterDim,...
numFilters,poolDim),theta,images,labels,testImages, testLabels, numClasses, filterDim, numFilters, poolDim,options);
%%======================================================================
%% STEP 4: Test
% Test the performance of the trained model using the MNIST test set. Your
% accuracy should be above 97% after 3 epochs of training
[~,cost,preds]=cnnCost(opttheta,testImages,testLabels,numClasses,...
filterDim,numFilters,poolDim,true);
acc = (sum(preds==testLabels)/length(preds))*100;
% Accuracy should be around 97.4% after 3 epochs
fprintf('Accuracy is %f percent\n',acc);