-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathCourse_2_Week_3_Project_2.py
166 lines (120 loc) · 5.77 KB
/
Course_2_Week_3_Project_2.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
"""
This is is a part of the DeepLearning.AI TensorFlow Developer Professional Certificate offered on Coursera.
All copyrights belong to them. I am sharing this work here to showcase the projects I have worked on
Course: Introduction to TensorFlow for Artificial Intelligence, Machine Learning, and Deep Learning
Week 3: Transfer Learning
Aim: Week 3 Test
"""
import os
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras import Model
from os import getcwd
import zipfile
path_inception = f"{getcwd()}/Saved_models/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5"
# Import the inception model
from tensorflow.keras.applications.inception_v3 import InceptionV3
# Create an instance of the inception model from the local pre-trained weights
local_weights_file = path_inception
pre_trained_model = InceptionV3(input_shape=(150, 150, 3), include_top=False, weights=None)
pre_trained_model.load_weights(local_weights_file)
# Make all the layers in the pre-trained model non-trainable
for layer in pre_trained_model.layers:
layer.trainable = False
# Print the model summary
#pre_trained_model.summary()
last_layer = pre_trained_model.get_layer('mixed3')
print('last layer output shape: ', last_layer.output_shape)
last_output = last_layer.output
# Expected Output:
# ('last layer output shape: ', (None, 7, 7, 768))
# Define a Callback class that stops training once accuracy reaches 97.0%
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('acc')>0.97):
print("\nReached 97% accuracy so cancelling training!")
self.model.stop_training = True
from tensorflow.keras.optimizers import RMSprop
# Flatten the output layer to 1 dimension
x = layers.Flatten()(last_output)
# Add a fully connected layer with 1,024 hidden units and ReLU activation
x = layers.Dense(1024, activation = 'relu')(x)
# Add a dropout rate of 0.2
x = layers.Dropout(0.2)(x)
# Add a final sigmoid layer for classification
x = layers.Dense(1, activation='sigmoid')(x)
model = Model(pre_trained_model.input, x)
model.compile(optimizer = RMSprop(lr=0.0001),
loss = 'binary_crossentropy',
metrics = ['acc'])
model.summary()
"""From horse_human dataset"""
local_zip = f"{getcwd()}/Dataset/horse-or-human.zip"
zip_ref=zipfile.ZipFile(local_zip, "r")
zip_ref.extractall("/Dataset/horse-or-human")
local_zip=f"{getcwd()}/Dataset/validation-horse-or-human.zip"
zip_ref=zipfile.ZipFile(local_zip, "r")
zip_ref.extractall("/Dataset/validation-horse-or-human")
zip_ref.close()
# Directory with our training horse pictures
train_horse_dir=os.path.join("/Dataset/horse-or-human/horses")
# Directory with our training human pictures
train_human_dir = os.path.join("/Dataset/horse-or-human/humans")
"""New Addition: Validation set"""
# Directory with our training horse pictures
validation_horse_dir=os.path.join("/Dataset/validation-horse-or-human/horses")
# Directory with our training human pictures
validation_human_dir = os.path.join("/Dataset/validation-horse-or-human/humans")
train_horse_names = os.listdir(train_horse_dir)
print(train_horse_names[:10])
train_human_names = os.listdir(train_human_dir)
print(train_human_names[:10])
validation_horse_names = os.listdir(validation_horse_dir)
print(validation_horse_names[:10])
validation_human_names = os.listdir(validation_human_dir)
print(validation_human_names[:10])
print("Total training horse images: ", len(os.listdir(train_horse_dir)))
print("Total training human images: ", len(os.listdir(train_human_dir)))
print("Total validation horse images: ", len(os.listdir(validation_horse_dir)))
print("Total validation human images: ", len(os.listdir(validation_human_dir)))
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
validation_datagen = ImageDataGenerator(rescale=1/255)
train_generator = train_datagen.flow_from_directory("/Dataset/horse-or-human/", target_size = (150,150), batch_size=128, class_mode = "binary")
validation_generator = validation_datagen.flow_from_directory("/Dataset/validation-horse-or-human/", target_size = (150,150), batch_size=32, class_mode = "binary")
# Run this and see how many epochs it should take before the callback
# fires, and stops training at 97% accuracy
callbacks = myCallback()
history = model.fit(train_generator, validation_data=validation_generator,
steps_per_epoch=50, epochs=5, validation_steps=50, verbose=0, callbacks=[callbacks])
import matplotlib.pyplot as plt
#-----------------------------------------------------------
# Retrieve a list of list results on training and test data
# sets for each training epoch
#-----------------------------------------------------------
acc = history.history[ 'acc' ]
val_acc = history.history[ 'val_acc' ]
loss = history.history[ 'loss' ]
val_loss = history.history['val_loss' ]
epochs = range(len(acc)) # Get number of epochs
#------------------------------------------------
# Plot training and validation accuracy per epoch
#------------------------------------------------
plt.plot ( epochs, acc )
plt.plot ( epochs, val_acc )
plt.title ('Training and validation accuracy')
plt.figure()
#------------------------------------------------
# Plot training and validation loss per epoch
#------------------------------------------------
plt.plot ( epochs, loss )
plt.plot ( epochs, val_loss )
plt.title ('Training and validation loss')