@@ -414,63 +414,99 @@ def calculatewdeltas(self, delta_w_matrix):
414
414
self .kernels [k ][i + j ].updateweights ()
415
415
416
416
return dE_doutx
417
-
418
- #TODO : Add channels for arrays
419
417
class MaxPoolingLayer :
418
+ """2D Max Pooling layer class
419
+ """
420
420
def __init__ (self , kernel_size , input_dim ):
421
+ """Max pooling constructor
422
+
423
+ Args:
424
+ kernel_size (int): dimension of sqaure kernel
425
+ input_dim (list): 3 element list (channels, x, y) of input shape to layer
426
+ """
421
427
self .k_s = kernel_size
422
428
self .i_d = input_dim
423
429
424
430
out = np .floor (((input_dim [1 ] - kernel_size )/ kernel_size )+ 1 )
431
+ # size of output feature maps
425
432
self .output_size = [input_dim [0 ], int (out ), int (out )]
426
433
427
434
def calculate (self , input ):
435
+ """Function for forward pass
436
+
437
+ Args:
438
+ input (ndarray): Numpy array of layer input (channels, x, y)
439
+
440
+ Returns:
441
+ ndarray: output array of max pooling
442
+ """
428
443
self .max_loc = np .zeros (input .shape )
429
444
feature_map = np .zeros (self .output_size )
430
445
431
446
for i in range (input .shape [0 ]):
432
447
#iterate over channels
433
448
for j in range (self .output_size [1 ]):
434
449
for k in range (self .output_size [2 ]):
450
+ # Create sub array of input to pool over
435
451
sub_arr = input [i , (j * self .k_s ): (j * self .k_s )+ self .k_s , (k * self .k_s ): (k * self .k_s )+ self .k_s ]
436
452
ind = np .unravel_index (np .argmax (sub_arr , axis = None ), sub_arr .shape )
437
453
feature_map [i ][j ][k ] = sub_arr [ind ]
454
+ # Saves max location in input for backprop
438
455
self .max_loc [i ][(j * self .k_s )+ ind [0 ]][(k * self .k_s )+ ind [1 ]] = 1
439
456
440
- """
441
- out_dim = ((i_d - k_s)/k_s)+1
442
- feature_map = np.array((out_dim, out_dim))
443
- for i in range(out_dim):
444
- for j in range(out_dim):
445
- sub_arr = input[(i*k_s): (i*k_s)+k_s, (j*k_s): (j*k_s)+k_s]
446
- ind = np.unravel_index(np.argmax(sub_arr, axis=None), sub_arr.shape)
447
- feature_map[i][j] = sub_arr[ind]
448
- max_loc[(i*k_s)+ind[0]][(j*k_s)+ind[1]] = 1
449
- """
450
-
451
- #save indexes better?
452
-
453
457
return feature_map
454
458
455
459
def calculatewdeltas (self , input ):
460
+ """Backpropagation for max pooling
461
+
462
+ Args:
463
+ input (ndarray): numpy array of input for backprop (channels, x ,y)
464
+
465
+ Returns:
466
+ ndarray: output of backpropogation of max pooling (channels, x, y)
467
+ """
456
468
output = copy .deepcopy (self .max_loc )
457
469
458
470
for i in range (input .shape [0 ]):
459
471
for j in range (input .shape [1 ]):
460
472
for k in range (input .shape [2 ]):
473
+ # Multiply splice of output array by the update
461
474
output [i , (j * self .k_s ): (j * self .k_s )+ self .k_s , (k * self .k_s ): (k * self .k_s )+ self .k_s ] *= input [i , j , k ]
462
475
463
476
return output
464
477
465
478
class FlattenLayer :
479
+ """Flatten layer
480
+ """
466
481
def __init__ (self , input_size ):
482
+ """Constructor
483
+
484
+ Args:
485
+ input_size (list): list of input size (channels, x, y)
486
+ """
467
487
self .i_s = input_size
468
488
self .output_size = [self .i_s [0 ] * self .i_s [1 ] * self .i_s [2 ]]
469
489
470
490
def calculate (self , input ):
491
+ """Reshapes input to flatten
492
+
493
+ Args:
494
+ input (ndarray): numpy array to flatten (channels, x, y)
495
+
496
+ Returns:
497
+ ndarray: flattened input 1D ndarray
498
+ """
471
499
return np .reshape (input , - 1 )
472
500
473
501
def calculatewdeltas (self , input ):
502
+ """Reshape into original input
503
+
504
+ Args:
505
+ input (ndarray): 1D numpy array to reshape
506
+
507
+ Returns:
508
+ ndarray: (channels, x, y)
509
+ """
474
510
return np .reshape (input , self .i_s )
475
511
476
512
class NeuralNetwork : #initialize with the number of layers, number of neurons in each layer (vector), input size, activation (for each layer), the loss function, the learning rate and a 3d matrix of weights weights (or else initialize randomly)
0 commit comments