-
Notifications
You must be signed in to change notification settings - Fork 0
/
Course_2_Week_4_Project_1.py
124 lines (98 loc) · 4.4 KB
/
Course_2_Week_4_Project_1.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
"""
This is is a part of the DeepLearning.AI TensorFlow Developer Professional Certificate offered on Coursera.
All copyrights belong to them. I am sharing this work here to showcase the projects I have worked on
Course: Introduction to TensorFlow for Artificial Intelligence, Machine Learning, and Deep Learning
Week 1: Multiclass Classifications
Aim: Sign MNIST Dataset
"""
import csv
import os
import zipfile
from os import getcwd
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
local_zip = f"{getcwd()}/Dataset/archive.zip"
zip_ref=zipfile.ZipFile(local_zip, "r")
zip_ref.extractall("/Dataset/sign_mnist_train.csv")
"""local_zip=f"{getcwd()}/Dataset/validation-horse-or-human.zip"""
zip_ref=zipfile.ZipFile(local_zip, "r")
zip_ref.extractall("/Dataset/sign_mnist_test.csv")
zip_ref.close()
def get_data (filename):
with open(filename) as training_file:
csv_reader = csv.reader(training_file, delimiter =",")
first_line = True
temp_images = []
temp_labels = []
for row in csv_reader:
if first_line:
first_line=False
else:
temp_labels.append(row[0])
image_data = row[1:]
image_data_as_array=np.array_split(image_data, 28)
temp_images.append(image_data_as_array)
images = np.array(temp_images).astype("float")
labels = np.array(temp_labels).astype("float")
return images, labels
path_sign_mnist_train = f"{getcwd()}/Dataset/sign_mnist_train.csv"
path_sign_mnist_test = f"{getcwd()}/Dataset/sign_mnist_test.csv"
training_images, training_labels = get_data(path_sign_mnist_train)
testing_images, testing_labels = get_data(path_sign_mnist_test)
# Keep these
print(training_images.shape)
print(training_labels.shape)
print(testing_images.shape)
print(testing_labels.shape)
training_images= np.expand_dims(training_images, axis=-1)
testing_images= np.expand_dims(testing_images, axis=-1)
train_datagen = ImageDataGenerator(rescale=1 / 255, rotation_range=40, width_shift_range=0.2, height_shift_range=0.2,
shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest')
validation_datagen = ImageDataGenerator(rescale=1 / 255)
# Keep These
print(training_images.shape)
print(testing_images.shape)
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(16, (3, 3), activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(26, activation='softmax')
])
batch_size = 32
# Compile Model.
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['acc'])
"""
train_data=train_datagen.flow(training_images, training_labels, batch_size=batch_size)
test_data=validation_datagen.flow(testing_images, training_labels, batch_size=batch_size)
# Train the Model
history = model.fit_generator(train_data, validation_data=test_data, steps_per_epoch=len(train_data)/batch_size, epochs=2)
"""
history = model.fit_generator(train_datagen.flow(training_images, training_labels, batch_size=32),
steps_per_epoch=len(training_images) / 32,
epochs=50,
validation_data=validation_datagen.flow(testing_images, testing_labels, batch_size=32),
validation_steps=len(testing_images) / 32)
model.evaluate(testing_images, testing_labels)
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss =history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'r', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'r', label='Training Loss')
plt.plot(epochs, val_loss, 'b', label='Validation Loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()