diff --git a/CMakeLists.txt b/CMakeLists.txt index 490f7ff1..1a0a1be4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -30,6 +30,8 @@ add_library(neural-fortran src/nf/nf_flatten_layer_submodule.f90 src/nf/nf_input1d_layer.f90 src/nf/nf_input1d_layer_submodule.f90 + src/nf/nf_input2d_layer.f90 + src/nf/nf_input2d_layer_submodule.f90 src/nf/nf_input3d_layer.f90 src/nf/nf_input3d_layer_submodule.f90 src/nf/nf_layer_constructors.f90 diff --git a/LICENSE b/LICENSE index fb555cb2..1edfa42f 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2018-2024 neural-fortran contributors +Copyright (c) 2018-2025 neural-fortran contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/README.md b/README.md index 7e3a4445..905ff217 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,7 @@ Read the paper [here](https://arxiv.org/abs/1902.06714). | Layer type | Constructor name | Supported input layers | Rank of output array | Forward pass | Backward pass | |------------|------------------|------------------------|----------------------|--------------|---------------| -| Input | `input` | n/a | 1, 3 | n/a | n/a | +| Input | `input` | n/a | 1, 2, 3 | n/a | n/a | | Dense (fully-connected) | `dense` | `input1d`, `flatten` | 1 | ✅ | ✅ | | Convolutional (2-d) | `conv2d` | `input3d`, `conv2d`, `maxpool2d`, `reshape` | 3 | ✅ | ✅(*) | | Max-pooling (2-d) | `maxpool2d` | `input3d`, `conv2d`, `maxpool2d`, `reshape` | 3 | ✅ | ✅ | diff --git a/fpm.toml b/fpm.toml index 5f68f8f6..cab0d5d5 100644 --- a/fpm.toml +++ b/fpm.toml @@ -1,6 +1,6 @@ name = "neural-fortran" -version = "0.18.0" +version = "0.19.0" license = "MIT" author = "Milan Curcic" -maintainer = "milancurcic@hey.com" -copyright = "Copyright 2018-2024, neural-fortran contributors" +maintainer = "mcurcic@miami.edu" +copyright = "Copyright 2018-2025, neural-fortran contributors" diff --git a/src/nf/nf_input2d_layer.f90 b/src/nf/nf_input2d_layer.f90 new file mode 100644 index 00000000..d97879ea --- /dev/null +++ b/src/nf/nf_input2d_layer.f90 @@ -0,0 +1,48 @@ +module nf_input2d_layer + + !! This module provides the `input2d_layer` type. + + use nf_base_layer, only: base_layer + implicit none + + private + public :: input2d_layer + + type, extends(base_layer) :: input2d_layer + real, allocatable :: output(:,:) + contains + procedure :: init + procedure :: set + end type input2d_layer + + interface input2d_layer + pure module function input2d_layer_cons(output_shape) result(res) + !! Create a new instance of the 2-d input layer. + !! Only used internally by the `layer % init` method. + integer, intent(in) :: output_shape(2) + !! Shape of the input layer + type(input2d_layer) :: res + !! 2-d input layer instance + end function input2d_layer_cons + end interface input2d_layer + + interface + + module subroutine init(self, input_shape) + !! Only here to satisfy the language rules + !! about deferred methods of abstract types. + !! This method does nothing for this type and should not be called. + class(input2d_layer), intent(in out) :: self + integer, intent(in) :: input_shape(:) + end subroutine init + + pure module subroutine set(self, values) + class(input2d_layer), intent(in out) :: self + !! Layer instance + real, intent(in) :: values(:,:) + !! Values to set + end subroutine set + + end interface + +end module nf_input2d_layer \ No newline at end of file diff --git a/src/nf/nf_input2d_layer_submodule.f90 b/src/nf/nf_input2d_layer_submodule.f90 new file mode 100644 index 00000000..a7928df4 --- /dev/null +++ b/src/nf/nf_input2d_layer_submodule.f90 @@ -0,0 +1,23 @@ +submodule(nf_input2d_layer) nf_input2d_layer_submodule + implicit none +contains + + pure module function input2d_layer_cons(output_shape) result(res) + integer, intent(in) :: output_shape(2) + type(input2d_layer) :: res + allocate(res % output(output_shape(1), output_shape(2))) + res % output = 0 + end function input2d_layer_cons + + module subroutine init(self, input_shape) + class(input2d_layer), intent(in out) :: self + integer, intent(in) :: input_shape(:) + end subroutine init + + pure module subroutine set(self, values) + class(input2d_layer), intent(in out) :: self + real, intent(in) :: values(:,:) + self % output = values + end subroutine set + +end submodule nf_input2d_layer_submodule \ No newline at end of file diff --git a/src/nf/nf_layer.f90 b/src/nf/nf_layer.f90 index ca5e9606..33d1c773 100644 --- a/src/nf/nf_layer.f90 +++ b/src/nf/nf_layer.f90 @@ -35,12 +35,14 @@ module nf_layer ! Specific subroutines for different array ranks procedure, private :: backward_1d + procedure, private :: backward_2d procedure, private :: backward_3d procedure, private :: get_output_1d + procedure, private :: get_output_2d procedure, private :: get_output_3d - generic :: backward => backward_1d, backward_3d - generic :: get_output => get_output_1d, get_output_3d + generic :: backward => backward_1d, backward_2d, backward_3d + generic :: get_output => get_output_1d, get_output_2d, get_output_3d end type layer @@ -59,6 +61,19 @@ pure module subroutine backward_1d(self, previous, gradient) !! Array of gradient values from the next layer end subroutine backward_1d + pure module subroutine backward_2d(self, previous, gradient) + !! Apply a backward pass on the layer. + !! This changes the internal state of the layer. + !! This is normally called internally by the `network % backward` + !! method. + class(layer), intent(in out) :: self + !! Layer instance + class(layer), intent(in) :: previous + !! Previous layer instance + real, intent(in) :: gradient(:, :) + !! Array of gradient values from the next layer + end subroutine backward_2d + pure module subroutine backward_3d(self, previous, gradient) !! Apply a backward pass on the layer. !! This changes the internal state of the layer. @@ -95,6 +110,14 @@ pure module subroutine get_output_1d(self, output) !! Output values from this layer end subroutine get_output_1d + pure module subroutine get_output_2d(self, output) + !! Returns the output values (activations) from this layer. + class(layer), intent(in) :: self + !! Layer instance + real, allocatable, intent(out) :: output(:,:) + !! Output values from this layer + end subroutine get_output_2d + pure module subroutine get_output_3d(self, output) !! Returns the output values (activations) from a layer with a 3-d output !! (e.g. input3d, conv2d) diff --git a/src/nf/nf_layer_constructors.f90 b/src/nf/nf_layer_constructors.f90 index 309be6e4..ea1c08df 100644 --- a/src/nf/nf_layer_constructors.f90 +++ b/src/nf/nf_layer_constructors.f90 @@ -35,10 +35,10 @@ module function input1d(layer_size) result(res) !! Resulting layer instance end function input1d - module function input3d(layer_shape) result(res) - !! 3-d input layer constructor. + module function input2d(dim1, dim2) result(res) + !! 2-d input layer constructor. !! - !! This layer is for inputting 3-d data to the network. + !! This layer is for inputting 2-d data to the network. !! Currently, this layer must be followed by a conv2d layer. !! An input layer must be the first layer in the network. !! @@ -50,10 +50,29 @@ module function input3d(layer_shape) result(res) !! ``` !! use nf, only :: input, layer !! type(layer) :: input_layer - !! input_layer = input([28, 28, 1]) + !! input_layer = input(28, 28) + !! ``` + integer, intent(in) :: dim1, dim2 + !! First and second dimension sizes + type(layer) :: res + !! Resulting layer instance + end function input2d + + module function input3d(dim1, dim2, dim3) result(res) + !! 3-d input layer constructor. + !! + !! This is a specific function that is available + !! under a generic name `input`. + !! + !! Example: + !! + !! ``` + !! use nf, only :: input, layer + !! type(layer) :: input_layer + !! input_layer = input(28, 28, 1) !! ``` - integer, intent(in) :: layer_shape(3) - !! Shape of the input layer + integer, intent(in) :: dim1, dim2, dim3 + !! First, second and third dimension sizes type(layer) :: res !! Resulting layer instance end function input3d diff --git a/src/nf/nf_layer_constructors_submodule.f90 b/src/nf/nf_layer_constructors_submodule.f90 index 234b20b1..4c5994ee 100644 --- a/src/nf/nf_layer_constructors_submodule.f90 +++ b/src/nf/nf_layer_constructors_submodule.f90 @@ -5,6 +5,7 @@ use nf_dense_layer, only: dense_layer use nf_flatten_layer, only: flatten_layer use nf_input1d_layer, only: input1d_layer + use nf_input2d_layer, only: input2d_layer use nf_input3d_layer, only: input3d_layer use nf_maxpool2d_layer, only: maxpool2d_layer use nf_reshape_layer, only: reshape3d_layer @@ -81,16 +82,28 @@ module function input1d(layer_size) result(res) end function input1d - module function input3d(layer_shape) result(res) - integer, intent(in) :: layer_shape(3) + module function input2d(dim1, dim2) result(res) + integer, intent(in) :: dim1, dim2 type(layer) :: res res % name = 'input' - res % layer_shape = layer_shape + res % layer_shape = [dim1, dim2] res % input_layer_shape = [integer ::] - allocate(res % p, source=input3d_layer(layer_shape)) + allocate(res % p, source=input2d_layer([dim1, dim2])) + res % initialized = .true. + end function input2d + + + module function input3d(dim1, dim2, dim3) result(res) + integer, intent(in) :: dim1, dim2, dim3 + type(layer) :: res + res % name = 'input' + res % layer_shape = [dim1, dim2, dim3] + res % input_layer_shape = [integer ::] + allocate(res % p, source=input3d_layer([dim1, dim2, dim3])) res % initialized = .true. end function input3d + module function maxpool2d(pool_size, stride) result(res) integer, intent(in) :: pool_size integer, intent(in), optional :: stride @@ -119,6 +132,7 @@ module function maxpool2d(pool_size, stride) result(res) end function maxpool2d + module function reshape(output_shape) result(res) integer, intent(in) :: output_shape(:) type(layer) :: res diff --git a/src/nf/nf_layer_submodule.f90 b/src/nf/nf_layer_submodule.f90 index c672581a..41b9a2ce 100644 --- a/src/nf/nf_layer_submodule.f90 +++ b/src/nf/nf_layer_submodule.f90 @@ -5,6 +5,7 @@ use nf_dense_layer, only: dense_layer use nf_flatten_layer, only: flatten_layer use nf_input1d_layer, only: input1d_layer + use nf_input2d_layer, only: input2d_layer use nf_input3d_layer, only: input3d_layer use nf_maxpool2d_layer, only: maxpool2d_layer use nf_reshape_layer, only: reshape3d_layer @@ -51,6 +52,18 @@ pure module subroutine backward_1d(self, previous, gradient) end subroutine backward_1d + pure module subroutine backward_2d(self, previous, gradient) + implicit none + class(layer), intent(in out) :: self + class(layer), intent(in) :: previous + real, intent(in) :: gradient(:,:) + + ! Backward pass from a 2-d layer downstream currently implemented + ! only for dense and flatten layers + ! CURRENTLY NO LAYERS, tbd: pull/197 and pull/199 + end subroutine backward_2d + + pure module subroutine backward_3d(self, previous, gradient) implicit none class(layer), intent(in out) :: self @@ -205,6 +218,23 @@ pure module subroutine get_output_1d(self, output) end subroutine get_output_1d + pure module subroutine get_output_2d(self, output) + implicit none + class(layer), intent(in) :: self + real, allocatable, intent(out) :: output(:,:) + + select type(this_layer => self % p) + + type is(input2d_layer) + allocate(output, source=this_layer % output) + class default + error stop '1-d output can only be read from an input1d, dense, or flatten layer.' + + end select + + end subroutine get_output_2d + + pure module subroutine get_output_3d(self, output) implicit none class(layer), intent(in) :: self @@ -280,6 +310,8 @@ elemental module function get_num_params(self) result(num_params) select type (this_layer => self % p) type is (input1d_layer) num_params = 0 + type is (input2d_layer) + num_params = 0 type is (input3d_layer) num_params = 0 type is (dense_layer) @@ -305,6 +337,8 @@ module function get_params(self) result(params) select type (this_layer => self % p) type is (input1d_layer) ! No parameters to get. + type is (input2d_layer) + ! No parameters to get. type is (input3d_layer) ! No parameters to get. type is (dense_layer) @@ -330,6 +364,8 @@ module function get_gradients(self) result(gradients) select type (this_layer => self % p) type is (input1d_layer) ! No gradients to get. + type is (input2d_layer) + ! No gradients to get. type is (input3d_layer) ! No gradients to get. type is (dense_layer) @@ -373,6 +409,11 @@ module subroutine set_params(self, params) write(stderr, '(a)') 'Warning: calling set_params() ' & // 'on a zero-parameter layer; nothing to do.' + type is (input2d_layer) + ! No parameters to set. + write(stderr, '(a)') 'Warning: calling set_params() ' & + // 'on a zero-parameter layer; nothing to do.' + type is (input3d_layer) ! No parameters to set. write(stderr, '(a)') 'Warning: calling set_params() ' & diff --git a/src/nf/nf_network.f90 b/src/nf/nf_network.f90 index 8afa8884..fa7ea4eb 100644 --- a/src/nf/nf_network.f90 +++ b/src/nf/nf_network.f90 @@ -31,15 +31,18 @@ module nf_network procedure, private :: evaluate_batch_1d procedure, private :: forward_1d + procedure, private :: forward_2d procedure, private :: forward_3d procedure, private :: predict_1d + procedure, private :: predict_2d procedure, private :: predict_3d procedure, private :: predict_batch_1d procedure, private :: predict_batch_3d generic :: evaluate => evaluate_batch_1d - generic :: forward => forward_1d, forward_3d - generic :: predict => predict_1d, predict_3d, predict_batch_1d, predict_batch_3d + generic :: forward => forward_1d, forward_2d, forward_3d + generic :: predict => predict_1d, predict_2d, predict_3d + generic :: predict_batch => predict_batch_1d, predict_batch_3d end type network @@ -91,6 +94,20 @@ module subroutine forward_1d(self, input) !! 1-d input data end subroutine forward_1d + module subroutine forward_2d(self, input) + !! Apply a forward pass through the network. + !! + !! This changes the state of layers on the network. + !! Typically used only internally from the `train` method, + !! but can be invoked by the user when creating custom optimizers. + !! + !! This specific subroutine is for 1-d input data. + class(network), intent(in out) :: self + !! Network instance + real, intent(in) :: input(:,:) + !! 2-d input data + end subroutine forward_2d + module subroutine forward_3d(self, input) !! Apply a forward pass through the network. !! @@ -119,6 +136,16 @@ module function predict_1d(self, input) result(res) !! Output of the network end function predict_1d + module function predict_2d(self, input) result(res) + !! Return the output of the network given the input 1-d array. + class(network), intent(in out) :: self + !! Network instance + real, intent(in) :: input(:,:) + !! Input data + real, allocatable :: res(:) + !! Output of the network + end function predict_2d + module function predict_3d(self, input) result(res) !! Return the output of the network given the input 3-d array. class(network), intent(in out) :: self @@ -128,7 +155,9 @@ module function predict_3d(self, input) result(res) real, allocatable :: res(:) !! Output of the network end function predict_3d + end interface output + interface output_batch module function predict_batch_1d(self, input) result(res) !! Return the output of the network given an input batch of 3-d data. class(network), intent(in out) :: self @@ -148,8 +177,7 @@ module function predict_batch_3d(self, input) result(res) real, allocatable :: res(:,:) !! Output of the network; the last dimension is the batch end function predict_batch_3d - - end interface output + end interface output_batch interface diff --git a/src/nf/nf_network_submodule.f90 b/src/nf/nf_network_submodule.f90 index 140c9226..506c3295 100644 --- a/src/nf/nf_network_submodule.f90 +++ b/src/nf/nf_network_submodule.f90 @@ -4,6 +4,7 @@ use nf_dense_layer, only: dense_layer use nf_flatten_layer, only: flatten_layer use nf_input1d_layer, only: input1d_layer + use nf_input2d_layer, only: input2d_layer use nf_input3d_layer, only: input3d_layer use nf_maxpool2d_layer, only: maxpool2d_layer use nf_reshape_layer, only: reshape3d_layer @@ -160,7 +161,7 @@ module function evaluate_batch_1d(self, input_data, output_data, metric) result( integer :: i, n real, allocatable :: output(:,:) - output = self % predict(input_data) + output = self % predict_batch(input_data) n = 1 if (present(metric)) n = n + 1 @@ -197,6 +198,23 @@ module subroutine forward_1d(self, input) end subroutine forward_1d + module subroutine forward_2d(self, input) + class(network), intent(in out) :: self + real, intent(in) :: input(:,:) + integer :: n + + ! Set the input array into the input layer + select type(input_layer => self % layers(1) % p); type is(input2d_layer) + call input_layer % set(input) + end select + + do n = 2, size(self % layers) + call self % layers(n) % forward(self % layers(n - 1)) + end do + + end subroutine forward_2d + + module subroutine forward_3d(self, input) class(network), intent(in out) :: self real, intent(in) :: input(:,:,:) @@ -236,6 +254,24 @@ module function predict_1d(self, input) result(res) end function predict_1d + module function predict_2d(self, input) result(res) + class(network), intent(in out) :: self + real, intent(in) :: input(:,:) + real, allocatable :: res(:) + integer :: num_layers + + num_layers = size(self % layers) + + call self % forward(input) + + select type(output_layer => self % layers(num_layers) % p) + type is(dense_layer) + res = output_layer % output + end select + + end function predict_2d + + module function predict_3d(self, input) result(res) class(network), intent(in out) :: self real, intent(in) :: input(:,:,:) diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index bfd3538a..35954894 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -1,5 +1,6 @@ foreach(execid input1d_layer + input2d_layer input3d_layer parametric_activation dense_layer diff --git a/test/test_conv2d_layer.f90 b/test/test_conv2d_layer.f90 index 5840063b..10a14c5e 100644 --- a/test/test_conv2d_layer.f90 +++ b/test/test_conv2d_layer.f90 @@ -29,7 +29,7 @@ program test_conv2d_layer write(stderr, '(a)') 'conv2d layer defaults to relu activation.. failed' end if - input_layer = input([3, 32, 32]) + input_layer = input(3, 32, 32) call conv_layer % init(input_layer) if (.not. conv_layer % initialized) then @@ -51,7 +51,7 @@ program test_conv2d_layer allocate(sample_input(1, 3, 3)) sample_input = 0 - input_layer = input([1, 3, 3]) + input_layer = input(1, 3, 3) conv_layer = conv2d(filters, kernel_size) call conv_layer % init(input_layer) diff --git a/test/test_conv2d_network.f90 b/test/test_conv2d_network.f90 index 47c9a819..42d868df 100644 --- a/test/test_conv2d_network.f90 +++ b/test/test_conv2d_network.f90 @@ -11,7 +11,7 @@ program test_conv2d_network ! 3-layer convolutional network net = network([ & - input([3, 32, 32]), & + input(3, 32, 32), & conv2d(filters=16, kernel_size=3), & conv2d(filters=32, kernel_size=3) & ]) @@ -48,7 +48,7 @@ program test_conv2d_network call random_number(sample_input) cnn = network([ & - input(shape(sample_input)), & + input(1, 5, 5), & conv2d(filters=1, kernel_size=3), & conv2d(filters=1, kernel_size=3), & dense(1) & @@ -84,7 +84,7 @@ program test_conv2d_network y = [0.1234567] cnn = network([ & - input(shape(x)), & + input(1, 8, 8), & conv2d(filters=1, kernel_size=3), & maxpool2d(pool_size=2), & conv2d(filters=1, kernel_size=3), & @@ -119,7 +119,7 @@ program test_conv2d_network y = [0.12345, 0.23456, 0.34567, 0.45678, 0.56789, 0.67890, 0.78901, 0.89012, 0.90123] cnn = network([ & - input(shape(x)), & + input(1, 12, 12), & conv2d(filters=1, kernel_size=3), & ! 1x12x12 input, 1x10x10 output maxpool2d(pool_size=2), & ! 1x10x10 input, 1x5x5 output conv2d(filters=1, kernel_size=3), & ! 1x5x5 input, 1x3x3 output diff --git a/test/test_flatten_layer.f90 b/test/test_flatten_layer.f90 index 70641c9c..e4e1d6e4 100644 --- a/test/test_flatten_layer.f90 +++ b/test/test_flatten_layer.f90 @@ -25,7 +25,7 @@ program test_flatten_layer write(stderr, '(a)') 'flatten layer is not initialized yet.. failed' end if - input_layer = input([1, 2, 2]) + input_layer = input(1, 2, 2) call test_layer % init(input_layer) if (.not. test_layer % initialized) then @@ -68,7 +68,7 @@ program test_flatten_layer end if net = network([ & - input([1, 28, 28]), & + input(1, 28, 28), & flatten(), & dense(10) & ]) diff --git a/test/test_get_set_network_params.f90 b/test/test_get_set_network_params.f90 index 3e285f3c..71963a1c 100644 --- a/test/test_get_set_network_params.f90 +++ b/test/test_get_set_network_params.f90 @@ -9,7 +9,7 @@ program test_get_set_network_params ! First test get_num_params() net = network([ & - input([3, 5, 5]), & ! 5 x 5 image with 3 channels + input(3, 5, 5), & ! 5 x 5 image with 3 channels conv2d(filters=2, kernel_size=3), & ! kernel shape [2, 3, 3, 3], output shape [2, 3, 3], 56 parameters total flatten(), & dense(4) & ! weights shape [72], biases shape [4], 76 parameters total @@ -45,7 +45,7 @@ program test_get_set_network_params ! Finally, test set_params() and get_params() for a conv2d layer net = network([ & - input([1, 3, 3]), & + input(1, 3, 3), & conv2d(filters=1, kernel_size=3) & ]) diff --git a/test/test_input2d_layer.f90 b/test/test_input2d_layer.f90 new file mode 100644 index 00000000..81a40621 --- /dev/null +++ b/test/test_input2d_layer.f90 @@ -0,0 +1,49 @@ +program test_input2d_layer + + use iso_fortran_env, only: stderr => error_unit + use nf, only: input, layer + use nf_input2d_layer, only: input2d_layer + + implicit none + + type(layer) :: test_layer + real, allocatable :: output(:,:) + logical :: ok = .true. + + test_layer = input(3, 32) + + if (.not. test_layer % name == 'input') then + ok = .false. + write(stderr, '(a)') 'input2d layer has its name set correctly.. failed' + end if + + if (.not. test_layer % initialized) then + ok = .false. + write(stderr, '(a)') 'input2d layer should be marked as initialized.. failed' + end if + + if (.not. all(test_layer % layer_shape == [3, 32])) then + ok = .false. + write(stderr, '(a)') 'input2d layer is created with requested size.. failed' + end if + + if (.not. size(test_layer % input_layer_shape) == 0) then + ok = .false. + write(stderr, '(a)') 'input2d layer has no input layer shape.. failed' + end if + + call test_layer % get_output(output) + + if (.not. all(output == 0)) then + ok = .false. + write(stderr, '(a)') 'input2d layer values are all initialized to 0.. failed' + end if + + if (ok) then + print '(a)', 'test_input2d_layer: All tests passed.' + else + write(stderr, '(a)') 'test_input2d_layer: One or more tests failed.' + stop 1 + end if + +end program test_input2d_layer diff --git a/test/test_input3d_layer.f90 b/test/test_input3d_layer.f90 index 748acbfb..0711220e 100644 --- a/test/test_input3d_layer.f90 +++ b/test/test_input3d_layer.f90 @@ -10,7 +10,7 @@ program test_input3d_layer real, allocatable :: output(:,:,:) logical :: ok = .true. - test_layer = input([3, 32, 32]) + test_layer = input(3, 32, 32) if (.not. test_layer % name == 'input') then ok = .false. diff --git a/test/test_insert_flatten.f90 b/test/test_insert_flatten.f90 index 22a135d8..c6b64f80 100644 --- a/test/test_insert_flatten.f90 +++ b/test/test_insert_flatten.f90 @@ -9,7 +9,7 @@ program test_insert_flatten logical :: ok = .true. net = network([ & - input([3, 32, 32]), & + input(3, 32, 32), & dense(10) & ]) @@ -19,7 +19,7 @@ program test_insert_flatten end if net = network([ & - input([3, 32, 32]), & + input(3, 32, 32), & conv2d(filters=1, kernel_size=3), & dense(10) & ]) @@ -32,7 +32,7 @@ program test_insert_flatten end if net = network([ & - input([3, 32, 32]), & + input(3, 32, 32), & conv2d(filters=1, kernel_size=3), & maxpool2d(pool_size=2, stride=2), & dense(10) & diff --git a/test/test_maxpool2d_layer.f90 b/test/test_maxpool2d_layer.f90 index 8ecad96f..5983a217 100644 --- a/test/test_maxpool2d_layer.f90 +++ b/test/test_maxpool2d_layer.f90 @@ -28,7 +28,7 @@ program test_maxpool2d_layer write(stderr, '(a)') 'maxpool2d layer should not be marked as initialized yet.. failed' end if - input_layer = input(input_shape) + input_layer = input(channels, width, width) call maxpool_layer % init(input_layer) if (.not. maxpool_layer % initialized) then