Skip to content

Commit

Permalink
Update for 0.4.7
Browse files Browse the repository at this point in the history
  • Loading branch information
SkyWorld117 committed Jun 6, 2021
1 parent 26c7cfb commit f5f9e4f
Show file tree
Hide file tree
Showing 3 changed files with 30 additions and 35 deletions.
2 changes: 1 addition & 1 deletion Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "DianoiaML"
uuid = "0bf644f3-9992-48f5-8d6e-b7168dcf6b06"
authors = ["Yi Zhu, @SkyWord117"]
version = "0.4.6"
version = "0.4.7"

[deps]
HDF5 = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f"
Expand Down
6 changes: 5 additions & 1 deletion UPDATES.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,9 @@
# Update Log

### Update 0.4.7 - 06.06.2021
- Fixed the UI of **GA**.
- Greatly optimized **GA** (about four times faster).

### Update 0.4.6 - 06.06.2021
- Fixed **Minibatch_GD** and **GA**.
- Slightly optimized **GA**.
Expand Down Expand Up @@ -85,4 +89,4 @@
- Added **Adam** and **AdaBelief** as optimizers.
- Added **One Hot** and **Flatten** as tools.
- Improved the structures.
- Known issues: Convolutional2D requires a lot of RAM and is relatively slow.
- Known issues: Convolutional2D requires a lot of RAM and is relatively slow.
57 changes: 24 additions & 33 deletions src/optimizer/ga.jl
Original file line number Diff line number Diff line change
Expand Up @@ -32,12 +32,12 @@ module GA
print("=")
end

@time for i in 1:gene_pool
for i in 1:gene_pool
models[i].activate(models[i], current_input_data)
losses[i] = monitor.func(models[i].layers[end-1].output, current_output_data)
end

@time for i in 1:gene_pool-num_copy
for i in 1:gene_pool-num_copy
get_weights!(weights, losses)
recomutation!(models[argmax(losses)], models[sample(weights)], models[sample(weights)], α, t, batch_size-mini_batch+1)
losses[argmax(losses)] = Inf32
Expand Down Expand Up @@ -79,39 +79,30 @@ module GA

function recomutation!(new_model, model₁, model₂, α, t, T)
for i in 1:model₁.num_layer
if hasproperty(model₁.layers[i], :filters) && hasproperty(model₁.layers[i], :biases)
@batch for j in eachindex(model₁.layers[i].filters)
if rand()<=α
new_model.layers[i].filters[j] = rand(min(model₁.layers[i].filters[j], model₂.layers[i].filters[j]):1.0f-3:max(model₁.layers[i].filters[j], model₂.layers[i].filters[j]))
new_model.layers[i].filters[j] += mutation_func(t, new_model.layers[i].filters[j], T)
else
new_model.layers[i].filters[j] = rand(min(model₁.layers[i].filters[j], model₂.layers[i].filters[j]):1.0f-3:max(model₁.layers[i].filters[j], model₂.layers[i].filters[j]))
end
end
@batch for j in eachindex(model₁.layers[i].biases)
if rand()<=α
new_model.layers[i].biases[j] = rand(min(model₁.layers[i].biases[j], model₂.layers[i].biases[j]):1.0f-3:max(model₁.layers[i].biases[j], model₂.layers[i].biases[j]))
new_model.layers[i].biases[j] += mutation_func(t, new_model.layers[i].biases[j], T)
else
new_model.layers[i].biases[j] = rand(min(model₁.layers[i].biases[j], model₂.layers[i].biases[j]):1.0f-3:max(model₁.layers[i].biases[j], model₂.layers[i].biases[j]))
end
if hasproperty(new_model.layers[i], :filters)
@avxt temp = model₂.layers[i].filters .- model₁.layers[i].filters
rand!(local_rng(), new_model.layers[i].filters, VectorizedRNG.StaticInt(0), model₁.layers[i].filters, temp)
rand!(local_rng(), temp)
@avxt for j in eachindex(temp)
new_model.layers[i].filters[j] = ifelse(temp[j]<=α, ifelse(temp[j]<=0.5, (1.0f0-new_model.layers[i].filters[j])*(1.0f0-temp[j]^(1-t/T)^5), -(new_model.layers[i].filters[j]+1.0f0)*(1.0f0-temp[j]^(1-t/T)^5)), new_model.layers[i].filters[j])
end
elseif hasproperty(model₁.layers[i], :weights) && hasproperty(model₁.layers[i], :biases)
@batch for j in eachindex(model₁.layers[i].weights)
if rand()<=α
new_model.layers[i].weights[j] = rand(min(model.layers[i].weights[j], model₂.layers[i].weights[j]):1.0f-3:max(model₁.layers[i].weights[j], model₂.layers[i].weights[j]))
new_model.layers[i].weights[j] += mutation_func(t, new_model.layers[i].weights[j], T)
else
new_model.layers[i].weights[j] = rand(min(model₁.layers[i].weights[j], model₂.layers[i].weights[j]):1.0f-3:max(model₁.layers[i].weights[j], model₂.layers[i].weights[j]))
end
end

if hasproperty(new_model.layers[i], :weights)
@avxt temp = model.layers[i].weights .- model₁.layers[i].weights
rand!(local_rng(), new_model.layers[i].weights, VectorizedRNG.StaticInt(0), model₁.layers[i].weights, temp)
rand!(local_rng(), temp)
@avxt for j in eachindex(temp)
new_model.layers[i].weights[j] = ifelse(temp[j]<=α, ifelse(temp[j]<=0.5, (1.0f0-new_model.layers[i].weights[j])*(1.0f0-temp[j]^(1-t/T)^5), -(new_model.layers[i].weights[j]+1.0f0)*(1.0f0-temp[j]^(1-t/T)^5)), new_model.layers[i].weights[j])
end
@batch for j in eachindex(model₁.layers[i].biases)
if rand()<=α
new_model.layers[i].biases[j] = rand(min(model₁.layers[i].biases[j], model₂.layers[i].biases[j]):1.0f-3:max(model₁.layers[i].biases[j], model₂.layers[i].biases[j]))
new_model.layers[i].biases[j] += mutation_func(t, new_model.layers[i].biases[j], T)
else
new_model.layers[i].biases[j] = rand(min(model₁.layers[i].biases[j], model₂.layers[i].biases[j]):1.0f-3:max(model₁.layers[i].biases[j], model₂.layers[i].biases[j]))
end
end

if hasproperty(new_model.layers[i], :biases)
@avxt temp = model₂.layers[i].biases .- model₁.layers[i].biases
rand!(local_rng(), new_model.layers[i].biases, VectorizedRNG.StaticInt(0), model₁.layers[i].biases, temp)
rand!(local_rng(), temp)
@avxt for j in eachindex(temp)
new_model.layers[i].biases[j] = ifelse(temp[j]<=α, ifelse(temp[j]<=0.5, (1.0f0-new_model.layers[i].biases[j])*(1.0f0-temp[j]^(1-t/T)^5), -(new_model.layers[i].biases[j]+1.0f0)*(1.0f0-temp[j]^(1-t/T)^5)), new_model.layers[i].biases[j])
end
end
end
Expand Down

0 comments on commit f5f9e4f

Please sign in to comment.