-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathlinear_perceptron.jl
69 lines (60 loc) · 1.72 KB
/
linear_perceptron.jl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
# use in linear perceptron
@inline h(Θ,x) = sinal(Θ'*x)
import Random
function trainer(model::LinearPerceptron{T},
X::AbstractArray{T},
Y::Vector{T}) where T<:AbstractFloat
shuffle_epoch = model.shuffle_epoch
random_state = model.random_state
max_epochs = model.max_epochs
if random_state!=-1
Random.seed!(random_state)
end
n,m = size(X)
X = hcat(X,ones(n,1)) # adding bias
history = []
nerrors,nlast_errors = Inf,0
epochs = 0
Θ = Random.rand(m+1) # already with bias
α = model.α # learning rate
while nerrors>0 && epochs < max_epochs
# stops when error is equal to zero or grater than last_error or reached max iterations
# shuffle dataset
if shuffle_epoch
sind = Random.shuffle(1:n)
x = X[sind,:]
y = Y[sind]
end
nerrors = 0
# weight updates for all samples
for i=1:n
xi = x[i,:]
ξ = h(Θ,xi) - y[i]
if ξ!=0
nerrors+=1
Θ = Θ - α * ξ * xi
end
end
nlast_errors = nerrors
epochs+=1
push!(history,nerrors)
end
if nerrors > 0
warn("Perceptron: Not converged. Max epochs $(max_epochs) reached. Error history: $(history) \n Try to increase max_epochs or may be you have a non linear problem.")
end
model.Θ = Θ
model.α = α
model.history = history
end
function predictor(model::LinearPerceptron{T},
X::AbstractArray{T}) where T<:AbstractFloat
Θ = model.Θ
α = model.α
n,m = size(X)
y = zeros(Real,n)
X = hcat(X,ones(n,1)) # adding bias
for i=1:n
y[i] = h(Θ,X[i,:])
end
y
end