diff --git a/example/cnn_mnist.f90 b/example/cnn_mnist.f90 index e8f6f0a0..fecb7deb 100644 --- a/example/cnn_mnist.f90 +++ b/example/cnn_mnist.f90 @@ -11,9 +11,6 @@ program cnn_mnist real, allocatable :: training_images(:,:), training_labels(:) real, allocatable :: validation_images(:,:), validation_labels(:) real, allocatable :: testing_images(:,:), testing_labels(:) - real, allocatable :: input_reshaped(:,:,:,:) - real :: acc - logical :: ok integer :: n integer, parameter :: num_epochs = 10 diff --git a/example/get_set_network_params.f90 b/example/get_set_network_params.f90 index c4a5f980..bf9613b6 100644 --- a/example/get_set_network_params.f90 +++ b/example/get_set_network_params.f90 @@ -9,8 +9,7 @@ program get_set_network_params integer, parameter :: test_size = 30 real :: xtest(test_size), ytest(test_size) real :: ypred1(test_size), ypred2(test_size) - integer :: i, n, nparam - real, allocatable :: parameters(:) + integer :: i, n print '("Getting and setting network parameters")' print '(60("="))' diff --git a/example/quadratic.f90 b/example/quadratic.f90 index 9bd4cdc3..30303ad8 100644 --- a/example/quadratic.f90 +++ b/example/quadratic.f90 @@ -24,7 +24,7 @@ program quadratic_fit real, allocatable :: x(:), y(:) ! training data real, allocatable :: xtest(:), ytest(:) ! testing data - integer :: i, n + integer :: i print '("Fitting quadratic function")' print '(60("="))' @@ -277,7 +277,7 @@ subroutine rmsprop_optimizer( & real, intent(in) :: xtest(:), ytest(:) real, intent(in) :: learning_rate, decay_rate integer, intent(in) :: num_epochs - integer :: i, j, n + integer :: i, n real, allocatable :: ypred(:) print '(a)', 'RMSProp optimizer' @@ -446,4 +446,4 @@ subroutine shuffle(arr) end do end subroutine shuffle -end program quadratic_fit \ No newline at end of file +end program quadratic_fit diff --git a/src/nf/nf_network_submodule.f90 b/src/nf/nf_network_submodule.f90 index 59abcf7d..2e9c76d6 100644 --- a/src/nf/nf_network_submodule.f90 +++ b/src/nf/nf_network_submodule.f90 @@ -566,11 +566,10 @@ module subroutine train(self, input_data, output_data, batch_size, & integer, intent(in) :: epochs class(optimizer_base_type), intent(in), optional :: optimizer class(loss_type), intent(in), optional :: loss - class(optimizer_base_type), allocatable :: optimizer_ real :: pos integer :: dataset_size - integer :: batch_start, batch_end + integer :: batch_start integer :: i, j, n integer :: istart, iend, indices(2) @@ -600,11 +599,9 @@ module subroutine train(self, input_data, output_data, batch_size, & ! Pull a random mini-batch from the dataset call random_number(pos) batch_start = int(pos * (dataset_size - batch_size + 1)) + 1 - batch_end = batch_start + batch_size - 1 ! FIXME shuffle in a way that doesn't require co_broadcast call co_broadcast(batch_start, 1) - call co_broadcast(batch_end, 1) ! Distribute the batch in nearly equal pieces to all images indices = tile_indices(batch_size) @@ -628,7 +625,6 @@ module subroutine update(self, optimizer, batch_size) class(network), intent(in out) :: self class(optimizer_base_type), intent(in), optional :: optimizer integer, intent(in), optional :: batch_size - class(optimizer_base_type), allocatable :: optimizer_ integer :: batch_size_ real, allocatable :: params(:) integer :: n diff --git a/test/test_flatten_layer.f90 b/test/test_flatten_layer.f90 index cc780acd..70641c9c 100644 --- a/test/test_flatten_layer.f90 +++ b/test/test_flatten_layer.f90 @@ -9,7 +9,7 @@ program test_flatten_layer type(layer) :: test_layer, input_layer type(network) :: net - real, allocatable :: input_data(:,:,:), gradient(:,:,:) + real, allocatable :: gradient(:,:,:) real, allocatable :: output(:) logical :: ok = .true. diff --git a/test/test_get_set_network_params.f90 b/test/test_get_set_network_params.f90 index b9cc24b1..3e285f3c 100644 --- a/test/test_get_set_network_params.f90 +++ b/test/test_get_set_network_params.f90 @@ -3,7 +3,6 @@ program test_get_set_network_params use nf, only: conv2d, dense, flatten, input, maxpool2d, network implicit none type(network) :: net - integer :: n logical :: ok = .true. real :: test_params_dense(8) = [1, 2, 3, 4, 5, 6, 7, 8] real :: test_params_conv2d(10) = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] diff --git a/test/test_optimizers.f90 b/test/test_optimizers.f90 index dc2cc03a..4ed1d927 100644 --- a/test/test_optimizers.f90 +++ b/test/test_optimizers.f90 @@ -8,7 +8,7 @@ program test_optimizers real, allocatable :: x(:), y(:) real, allocatable :: ypred(:) integer, parameter :: num_iterations = 1000 - integer :: n, i + integer :: n logical :: ok = .true. logical :: converged = .false.