Skip to content

Commit fd5e9a2

Browse files
committed
update from julia 0.7 to 1.1
1 parent 9325cda commit fd5e9a2

5 files changed

+33
-23
lines changed

src/kernel_perceptron.jl

+2-2
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ end
4444
function trainer(model::KernelPerceptron{T},
4545
X::AbstractArray{T},
4646
Y::Vector{T}) where T<:AbstractFloat
47-
Y[Y .== 0] = -1 # fix in the future outside this function
47+
Y[Y .== 0] .= -1 # fix in the future outside this function
4848
max_epochs = model.max_epochs
4949
λ = model.λ # langrange multipliers
5050
K = ΦΦ(X,model.width) # computing the kernel gram matrix
@@ -100,7 +100,7 @@ function predictor(model::KernelPerceptron{T},
100100
y[i] = s
101101
end
102102
y = sign.(y)
103-
y[y .== -1] = 0 # fix in the future outside this function!!
103+
y[y .== -1] .= 0 # fix in the future outside this function!!
104104
return y
105105

106106
end

src/linear_perceptron.jl

+3-2
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
# use in linear perceptron
22
@inline h(Θ,x) = sinal'*x)
33

4+
import Random
45

56
function trainer(model::LinearPerceptron{T},
67
X::AbstractArray{T},
@@ -11,15 +12,15 @@ function trainer(model::LinearPerceptron{T},
1112
max_epochs = model.max_epochs
1213

1314
if random_state!=-1
14-
srand(random_state)
15+
Random.seed!(random_state)
1516
end
1617

1718
n,m = size(X)
1819
X = hcat(X,ones(n,1)) # adding bias
1920
history = []
2021
nerrors,nlast_errors = Inf,0
2122
epochs = 0
22-
Θ = rand(m+1) # already with bias
23+
Θ = Random.rand(m+1) # already with bias
2324
α = model.α # learning rate
2425
while nerrors>0 && epochs < max_epochs
2526
# stops when error is equal to zero or grater than last_error or reached max iterations

src/types.jl

+18-17
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
const MODEL_FILENAME = "perceptron_model.jld" # jld filename for storing the model
33
const MODEL_ID = "perceptron_model" # if od the model in the filesystem jld data
44

5-
using Statistics
5+
using Statistics
66

77
#### An abstract perceptron model
88
abstract type PerceptronModel{T} end
@@ -30,14 +30,14 @@ function LinearPerceptron(X::AbstractArray{T},
3030
centralize) where T<:AbstractFloat
3131

3232
return LinearPerceptron(alpha, # I will refactor to a constructor. Cleaner
33-
Vector{T}(1),
33+
Vector{T}(undef,1),
3434
shuffle_epoch,
3535
random_state,
3636
max_epochs,
3737
0,
38-
Vector{Integer}(1),
39-
mean(X,1),
40-
std(X,1),
38+
Vector{Integer}(undef,1),
39+
mean(X,dims=1),
40+
std(X,dims=1),
4141
centralize,
4242
size(X,2))
4343

@@ -76,9 +76,9 @@ function VotedPerceptron(X::AbstractArray{T},
7676
random_state,
7777
max_epochs,
7878
0,
79-
Vector{Integer}(1),
80-
mean(X,1),
81-
std(X,1),
79+
Vector{Integer}(undef,1),
80+
mean(X,dims=1),
81+
std(X,dims=1),
8282
centralize,
8383
size(X,2))
8484

@@ -113,15 +113,15 @@ function KernelPerceptron(X::AbstractArray{T},
113113
return KernelPerceptron(zeros(T,size(X,1)),
114114
max_epochs,
115115
0,
116-
Vector{Integer}(1),
117-
mean(X,1),
118-
std(X,1),
116+
Vector{Integer}(undef,1),
117+
mean(X,dims=1),
118+
std(X,dims=1),
119119
centralize,
120120
size(X,2),
121121
kernel,
122122
width,
123-
Vector{T}(1),
124-
Vector{T}(1))
123+
Vector{T}(undef,1),
124+
Vector{T}(undef,1))
125125

126126
end
127127

@@ -155,9 +155,9 @@ function AveragedPerceptron(X::AbstractArray{T},
155155
random_state,
156156
max_epochs,
157157
0,
158-
Vector{Integer}(1),
159-
mean(X,1),
160-
std(X,1),
158+
Vector{Integer}(undef,1),
159+
mean(X,dims=1),
160+
std(X,dims=1),
161161
centralize,
162162
size(X,2))
163163

@@ -169,7 +169,7 @@ end
169169

170170
## choosing types
171171
######################################################################################################
172-
function Model(X::AbstractArray{T},
172+
function Model(X,#::AbstractArray{T},
173173
alpha,
174174
shuffle_epoch,
175175
random_state,
@@ -179,6 +179,7 @@ function Model(X::AbstractArray{T},
179179
width,
180180
mode) where T<:AbstractFloat
181181

182+
println("size",size(X))
182183
if mode == "linear"
183184
return LinearPerceptron(X,
184185
alpha,

test/kernel_perceptron_test.jl

+5-1
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,10 @@ end
103103
X = [1.0 1.0; 0.0 1.0; 1.0 0.0; 0.0 0.0]
104104
Y = [1.0 ; -1; 0.0; 0.0]
105105

106-
try model = Perceptrons.fit(X,Y,mode="kernel",kernel="rbf",width=1.0) catch @test true end
106+
try
107+
model = Perceptrons.fit(X,Y,mode="kernel",kernel="rbf",width=1.0)
108+
catch
109+
@test true
110+
end
107111

108112
end

test/linear_perceptron_test.jl

+5-1
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,10 @@ end
7373
X = [1.0 1.0; 0.0 1.0; 1.0 0.0; 0.0 0.0]
7474
Y = [1.0 ; -1; 0.0; 0.0]
7575

76-
try model = Perceptrons.fit(X,Y) catch @test true end
76+
try
77+
model = Perceptrons.fit(X,Y)
78+
catch
79+
@test true
80+
end
7781

7882
end

0 commit comments

Comments
 (0)