Skip to content

Commit 4887bcf

Browse files
committed
added docstrings
1 parent d1621f8 commit 4887bcf

File tree

2 files changed

+51
-20
lines changed

2 files changed

+51
-20
lines changed

__pycache__/project2.cpython-37.pyc

1014 Bytes
Binary file not shown.

project2.py

+51-20
Original file line numberDiff line numberDiff line change
@@ -401,68 +401,99 @@ def calculatewdeltas(self, delta_w_matrix):
401401
#dE_doutx.shape == input_size
402402

403403
return dE_doutx
404-
405-
#TODO : Add channels for arrays
406404
class MaxPoolingLayer:
405+
"""2D Max Pooling layer class
406+
"""
407407
def __init__(self, kernel_size, input_dim):
408+
"""Max pooling constructor
409+
410+
Args:
411+
kernel_size (int): dimension of sqaure kernel
412+
input_dim (list): 3 element list (channels, x, y) of input shape to layer
413+
"""
408414
self.k_s = kernel_size
409415
self.i_d = input_dim
410416

411417
out = np.floor(((input_dim[1] - kernel_size)/kernel_size)+1)
418+
# size of output feature maps
412419
self.output_size = [input_dim[0], int(out), int(out)]
413420

414421
def calculate(self, input):
422+
"""Function for forward pass
423+
424+
Args:
425+
input (ndarray): Numpy array of layer input (channels, x, y)
426+
427+
Returns:
428+
ndarray: output array of max pooling
429+
"""
415430
self.max_loc = np.zeros(input.shape)
416-
print('out size: ',self.output_size)
417431
feature_map = np.zeros(self.output_size)
418432

419433
for i in range(input.shape[0]):
420434
#iterate over channels
421435
for j in range(self.output_size[1]):
422436
for k in range(self.output_size[2]):
437+
# Create sub array of input to pool over
423438
sub_arr = input[i, (j*self.k_s): (j*self.k_s)+self.k_s, (k*self.k_s): (k*self.k_s)+self.k_s]
424439
ind = np.unravel_index(np.argmax(sub_arr, axis=None), sub_arr.shape)
425-
#rint('ind: ', ind)
426-
#print('feature_map: ', feature_map)
427440
feature_map[i][j][k] = sub_arr[ind]
441+
# Saves max location in input for backprop
428442
self.max_loc[i][(j*self.k_s)+ind[0]][(k*self.k_s)+ind[1]] = 1
429443

430-
"""
431-
out_dim = ((i_d - k_s)/k_s)+1
432-
feature_map = np.array((out_dim, out_dim))
433-
for i in range(out_dim):
434-
for j in range(out_dim):
435-
sub_arr = input[(i*k_s): (i*k_s)+k_s, (j*k_s): (j*k_s)+k_s]
436-
ind = np.unravel_index(np.argmax(sub_arr, axis=None), sub_arr.shape)
437-
feature_map[i][j] = sub_arr[ind]
438-
max_loc[(i*k_s)+ind[0]][(j*k_s)+ind[1]] = 1
439-
"""
440-
441-
#save indexes better?
442-
443444
return feature_map
444445

445446
def calculatewdeltas(self, input):
447+
"""Backpropagation for max pooling
448+
449+
Args:
450+
input (ndarray): numpy array of input for backprop (channels, x ,y)
451+
452+
Returns:
453+
ndarray: output of backpropogation of max pooling (channels, x, y)
454+
"""
446455
output = copy.deepcopy(self.max_loc)
447456

448457
for i in range(input.shape[0]):
449458
for j in range(input.shape[1]):
450459
for k in range(input.shape[2]):
460+
# Multiply splice of output array by the update
451461
output[i, (j*self.k_s): (j*self.k_s)+self.k_s, (k*self.k_s): (k*self.k_s)+self.k_s] *= input[i, j, k]
452462

453463
return output
454464

455465
class FlattenLayer:
466+
"""Flatten layer
467+
"""
456468
def __init__(self, input_size):
457-
print('FLATTEN INPUT SIZE', input_size)
469+
"""Constructor
470+
471+
Args:
472+
input_size (list): list of input size (channels, x, y)
473+
"""
458474
self.i_s = input_size
459-
#self.output_size = [self.i_s[0] * self.i_s[1] * self.i_s[2], 1]
460475
self.output_size = [self.i_s[0] * self.i_s[1] * self.i_s[2]]
461476

462477
def calculate(self, input):
478+
"""Reshapes input to flatten
479+
480+
Args:
481+
input (ndarray): numpy array to flatten (channels, x, y)
482+
483+
Returns:
484+
ndarray: flattened input 1D ndarray
485+
"""
463486
return np.reshape(input, -1)
464487

465488
def calculatewdeltas(self, input):
489+
"""Reshape into original input
490+
491+
Args:
492+
input (ndarray): 1D numpy array to reshape
493+
494+
Returns:
495+
ndarray: (channels, x, y)
496+
"""
466497
return np.reshape(input, self.i_s)
467498

468499
class NeuralNetwork: #initialize with the number of layers, number of neurons in each layer (vector), input size, activation (for each layer), the loss function, the learning rate and a 3d matrix of weights weights (or else initialize randomly)

0 commit comments

Comments
 (0)