Skip to content
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions example/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ foreach(execid
get_set_network_params
simple
sine
quadratic
)
add_executable(${execid} ${execid}.f90)
target_link_libraries(${execid} PRIVATE
Expand Down
151 changes: 151 additions & 0 deletions example/quadratic.f90
Original file line number Diff line number Diff line change
@@ -0,0 +1,151 @@
program quadratic_fit
use nf, only: dense, input, network
implicit none
type(network) :: net
real, dimension(:), allocatable :: x, y
integer, parameter :: num_iterations = 100000
integer, parameter :: test_size = 30
real, dimension(:), allocatable :: xtest, ytest, ypred
integer :: i, n, batch_size
real :: learning_rate

print '("Fitting quadratic function")'
print '(60("="))'

net = network([ &
input(1), &
dense(3), &
dense(1) &
])

call net % print_info()

allocate(xtest(test_size), ytest(test_size), ypred(test_size))
xtest = [(i - 1) * 2 / test_size, i = 1, test_size]
ytest = (xtest**2 / 2 + xtest / 2 + 1) / 2

! x and y as 1-D arrays
allocate(x(num_iterations), y(num_iterations))

! Generating the dataset
do i = 1, num_iterations
call random_number(x(i))
x(i) = x(i) * 2
y(i) = (x(i)**2 / 2 + x(i) / 2 + 1) / 2
end do

! optimizer and learning rate
learning_rate = 0.01
batch_size = 10


! SGD optimizer
call sgd_optimizer(net, x, y, learning_rate, num_iterations)

! Batch SGD optimizer
call batch_sgd_optimizer(net, x, y, learning_rate, num_iterations)

! Mini-batch SGD optimizer
call minibatch_sgd_optimizer(net, x, y, learning_rate, num_iterations, batch_size)

! Calculate predictions on the test set
ypred = [(net % predict([xtest(i)]), i = 1, test_size)]

! Print the mean squared error
print '(i0,1x,f9.6)', num_iterations, sum((ypred - ytest)**2) / size(ypred)

deallocate(x, y, xtest, ytest, ypred)
end program quadratic_fit


subroutine sgd_optimizer(net, x, y, learning_rate, num_iterations)
type(network), intent(inout) :: net
real, dimension(:), intent(in) :: x, y
real, intent(in) :: learning_rate
integer, intent(in) :: num_iterations
integer :: i, n, num_samples

num_samples = size(x)

do n = 1, num_iterations
do i = 1, num_samples
call net % forward([x(i)])
call net % backward([y(i)])
! SGD update
call update_parameters(net, learning_rate)
end do
end do
end subroutine sgd_optimizer


subroutine batch_sgd_optimizer(net, x, y, learning_rate, num_iterations)
type(network), intent(inout) :: net
real, dimension(:), intent(in) :: x, y
real, intent(in) :: learning_rate
integer, intent(in) :: num_iterations
integer :: i

call net % forward(x)
call net % backward(y)
! Batch SGD update
call update_parameters(net, learning_rate)

do i = 2, num_iterations
call net % forward(x)
call net % backward(y)
! Accumulating gradients
call accumulate_gradients(net)
end do

! Updating parameters
call update_parameters(net, learning_rate)
end subroutine batch_sgd_optimizer


subroutine minibatch_sgd_optimizer(net, x, y, learning_rate, num_iterations, batch_size)
type(network), intent(inout) :: net
real, dimension(:), intent(in) :: x, y
real, intent(in) :: learning_rate
integer, intent(in) :: num_iterations, batch_size
integer :: i, n, num_samples, num_batches, start_index, end_index
real, dimension(:), allocatable :: batch_x, batch_y

num_samples = size(x)
num_batches = num_samples / batch_size

allocate(batch_x(batch_size), batch_y(batch_size))

do n = 1, num_iterations
do i = 1, num_batches
! Selecting batch
start_index = (i - 1) * batch_size + 1
end_index = i * batch_size
batch_x = x(start_index:end_index)
batch_y = y(start_index:end_index)

call net % forward(batch_x)
call net % backward(batch_y)
! Mini-batch SGD update
call update_parameters(net, learning_rate)
end do
end do

deallocate(batch_x, batch_y)
end subroutine minibatch_sgd_optimizer


! subroutine update_parameters(net, learning_rate)
! type(network), intent(inout) :: net
! real, intent(in) :: learning_rate
! integer :: i, num_layers
! type(dense_layer) :: layer

! num_layers = net % num_layers()

! do i = 1, num_layers

! layer = net % get_layer(i)
! layer % weights = layer % weights - learning_rate * layer % gradients
! layer % biases = layer % biases - learning_rate * layer % gradients
! end do
! end subroutine update_parameters