Train a Tensorflow NN for Upgoat vs Downgoat using MobileNetV2 Feature Extractions¶
Import libraries¶
In [1]:
import os
import glob
import shutil
import random
import numpy as np
import cv2
import matplotlib.pyplot as plt
from PIL import Image
import tensorflow as tf
Define Input image data¶
In [2]:
train_dir = r"goats\data\train"
validation_dir = r"goats\data\valid"
test_dir = r"goats\data\test"
BATCH_SIZE = 32
IMG_SIZE = (128, 128)
train_dataset = tf.keras.utils.image_dataset_from_directory(train_dir,
shuffle=True,
batch_size=BATCH_SIZE,
image_size=IMG_SIZE)
validation_dataset = tf.keras.utils.image_dataset_from_directory(validation_dir,
shuffle=True,
batch_size=BATCH_SIZE,
image_size=IMG_SIZE)
test_dataset = tf.keras.utils.image_dataset_from_directory(test_dir,
shuffle=True,
batch_size=BATCH_SIZE,
image_size=IMG_SIZE)
Found 2780 files belonging to 2 classes. Found 520 files belonging to 2 classes. Found 173 files belonging to 2 classes.
Display training images¶
In [3]:
class_names = train_dataset.class_names
plt.figure(figsize=(10, 10))
for images, labels in train_dataset.take(1):
for i in range(25):
ax = plt.subplot(5, 5, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[labels[i]])
plt.axis("off")
Configure dataset flow and MobileNetV2 Pre processing¶
In [4]:
AUTOTUNE = tf.data.AUTOTUNE
train_dataset = train_dataset.prefetch(buffer_size=AUTOTUNE)
validation_dataset = validation_dataset.prefetch(buffer_size=AUTOTUNE)
test_dataset = test_dataset.prefetch(buffer_size=AUTOTUNE)
In [5]:
# Rescale pixel values
preprocess_input = tf.keras.applications.mobilenet_v2.preprocess_input
rescale = tf.keras.layers.Rescaling(1./127.5, offset=-1)
Create base model using MobileNetV2¶
In [6]:
# Create the base model from the pre-trained model MobileNet V2
IMG_SHAPE = IMG_SIZE + (3,) # 3 channels
base_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet')
MobileNetV2 Feature Extraction layer¶
In [7]:
# Feature Extraction converts images to block of features of size:
image_batch, label_batch = next(iter(train_dataset))
feature_batch = base_model(image_batch)
print(feature_batch.shape)
(32, 4, 4, 1280)
Global Average Pooling 2D layer¶
In [8]:
global_average_layer = tf.keras.layers.GlobalAveragePooling2D()
feature_batch_average = global_average_layer(feature_batch)
print(feature_batch_average.shape)
(32, 1280)
Final Prediction Layer (Sigmoid for Binary outputs)¶
In [9]:
prediction_layer = tf.keras.layers.Dense(1, activation='sigmoid')
prediction_batch = prediction_layer(feature_batch_average)
print(prediction_batch.shape)
(32, 1)
Compile Model and Configurations¶
In [10]:
inputs = tf.keras.Input(shape=(128, 128, 3))
x = preprocess_input(inputs)
x = base_model(x, training=False)
x = global_average_layer(x)
x = tf.keras.layers.Dropout(0.2)(x)
outputs = prediction_layer(x)
model = tf.keras.Model(inputs, outputs)
In [11]:
base_model.trainable = True
In [12]:
# Let's take a look to see how many layers are in the base model
print("Number of layers in the base model: ", len(base_model.layers))
# Fine-tune from this layer onwards
fine_tune_at = 100
# Freeze all the layers before the `fine_tune_at` layer
for layer in base_model.layers[:fine_tune_at]:
layer.trainable = False
Number of layers in the base model: 154
In [13]:
base_learning_rate = 0.00001
model.compile(loss=tf.keras.losses.BinaryCrossentropy(),
optimizer = tf.keras.optimizers.RMSprop(learning_rate=base_learning_rate),
metrics=[tf.keras.metrics.BinaryAccuracy(threshold=0.5, name='accuracy'),
tf.keras.metrics.Precision(thresholds=0.5, name='precision'),
tf.keras.metrics.Precision(thresholds=0.5, name='recall')])
In [14]:
model.summary()
Model: "model" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_2 (InputLayer) [(None, 128, 128, 3)] 0 tf.math.truediv (TFOpLambd (None, 128, 128, 3) 0 a) tf.math.subtract (TFOpLamb (None, 128, 128, 3) 0 da) mobilenetv2_1.00_128 (Func (None, 4, 4, 1280) 2257984 tional) global_average_pooling2d ( (None, 1280) 0 GlobalAveragePooling2D) dropout (Dropout) (None, 1280) 0 dense (Dense) (None, 1) 1281 ================================================================= Total params: 2259265 (8.62 MB) Trainable params: 1862721 (7.11 MB) Non-trainable params: 396544 (1.51 MB) _________________________________________________________________
Begin Trainning:¶
In [15]:
total_epochs = 20
history = model.fit(train_dataset,
epochs=total_epochs,
initial_epoch=0,
validation_data=validation_dataset)
Epoch 1/20 87/87 [==============================] - 20s 187ms/step - loss: 0.2174 - accuracy: 0.9119 - precision: 0.9347 - recall: 0.9347 - val_loss: 0.0990 - val_accuracy: 0.9577 - val_precision: 0.9685 - val_recall: 0.9685 Epoch 2/20 87/87 [==============================] - 16s 188ms/step - loss: 0.0723 - accuracy: 0.9737 - precision: 0.9761 - recall: 0.9761 - val_loss: 0.0775 - val_accuracy: 0.9635 - val_precision: 0.9879 - val_recall: 0.9879 Epoch 3/20 87/87 [==============================] - 17s 192ms/step - loss: 0.0451 - accuracy: 0.9824 - precision: 0.9848 - recall: 0.9848 - val_loss: 0.0551 - val_accuracy: 0.9769 - val_precision: 0.9697 - val_recall: 0.9697 Epoch 4/20 87/87 [==============================] - 17s 196ms/step - loss: 0.0259 - accuracy: 0.9932 - precision: 0.9928 - recall: 0.9928 - val_loss: 0.0828 - val_accuracy: 0.9712 - val_precision: 0.9520 - val_recall: 0.9520 Epoch 5/20 87/87 [==============================] - 16s 187ms/step - loss: 0.0160 - accuracy: 0.9960 - precision: 0.9964 - recall: 0.9964 - val_loss: 0.0378 - val_accuracy: 0.9846 - val_precision: 0.9884 - val_recall: 0.9884 Epoch 6/20 87/87 [==============================] - 17s 195ms/step - loss: 0.0119 - accuracy: 0.9964 - precision: 0.9964 - recall: 0.9964 - val_loss: 0.0379 - val_accuracy: 0.9808 - val_precision: 0.9808 - val_recall: 0.9808 Epoch 7/20 87/87 [==============================] - 17s 194ms/step - loss: 0.0075 - accuracy: 0.9975 - precision: 0.9978 - recall: 0.9978 - val_loss: 0.0277 - val_accuracy: 0.9865 - val_precision: 0.9847 - val_recall: 0.9847 Epoch 8/20 87/87 [==============================] - 16s 186ms/step - loss: 0.0036 - accuracy: 0.9996 - precision: 0.9993 - recall: 0.9993 - val_loss: 0.0358 - val_accuracy: 0.9865 - val_precision: 0.9961 - val_recall: 0.9961 Epoch 9/20 87/87 [==============================] - 16s 189ms/step - loss: 0.0029 - accuracy: 0.9989 - precision: 0.9993 - recall: 0.9993 - val_loss: 0.0428 - val_accuracy: 0.9865 - val_precision: 0.9961 - val_recall: 0.9961 Epoch 10/20 87/87 [==============================] - 17s 195ms/step - loss: 0.0018 - accuracy: 0.9996 - precision: 1.0000 - recall: 1.0000 - val_loss: 0.0278 - val_accuracy: 0.9846 - val_precision: 0.9884 - val_recall: 0.9884 Epoch 11/20 87/87 [==============================] - 16s 189ms/step - loss: 0.0017 - accuracy: 0.9996 - precision: 0.9993 - recall: 0.9993 - val_loss: 0.0275 - val_accuracy: 0.9865 - val_precision: 0.9922 - val_recall: 0.9922 Epoch 12/20 87/87 [==============================] - 17s 193ms/step - loss: 7.5902e-04 - accuracy: 1.0000 - precision: 1.0000 - recall: 1.0000 - val_loss: 0.0553 - val_accuracy: 0.9865 - val_precision: 1.0000 - val_recall: 1.0000 Epoch 13/20 87/87 [==============================] - 20s 226ms/step - loss: 4.7087e-04 - accuracy: 1.0000 - precision: 1.0000 - recall: 1.0000 - val_loss: 0.0245 - val_accuracy: 0.9865 - val_precision: 0.9884 - val_recall: 0.9884 Epoch 14/20 87/87 [==============================] - 17s 194ms/step - loss: 9.1501e-04 - accuracy: 0.9996 - precision: 0.9993 - recall: 0.9993 - val_loss: 0.0263 - val_accuracy: 0.9885 - val_precision: 0.9961 - val_recall: 0.9961 Epoch 15/20 87/87 [==============================] - 18s 207ms/step - loss: 6.7594e-04 - accuracy: 1.0000 - precision: 1.0000 - recall: 1.0000 - val_loss: 0.0321 - val_accuracy: 0.9885 - val_precision: 0.9961 - val_recall: 0.9961 Epoch 16/20 87/87 [==============================] - 18s 210ms/step - loss: 8.0619e-04 - accuracy: 0.9996 - precision: 0.9993 - recall: 0.9993 - val_loss: 0.0400 - val_accuracy: 0.9885 - val_precision: 1.0000 - val_recall: 1.0000 Epoch 17/20 87/87 [==============================] - 17s 190ms/step - loss: 1.3247e-04 - accuracy: 1.0000 - precision: 1.0000 - recall: 1.0000 - val_loss: 0.0265 - val_accuracy: 0.9904 - val_precision: 0.9961 - val_recall: 0.9961 Epoch 18/20 87/87 [==============================] - 17s 193ms/step - loss: 2.8135e-04 - accuracy: 1.0000 - precision: 1.0000 - recall: 1.0000 - val_loss: 0.0310 - val_accuracy: 0.9885 - val_precision: 0.9922 - val_recall: 0.9922 Epoch 19/20 87/87 [==============================] - 18s 207ms/step - loss: 1.3663e-04 - accuracy: 1.0000 - precision: 1.0000 - recall: 1.0000 - val_loss: 0.0302 - val_accuracy: 0.9904 - val_precision: 0.9961 - val_recall: 0.9961 Epoch 20/20 87/87 [==============================] - 17s 189ms/step - loss: 9.4391e-05 - accuracy: 1.0000 - precision: 1.0000 - recall: 1.0000 - val_loss: 0.0270 - val_accuracy: 0.9904 - val_precision: 0.9961 - val_recall: 0.9961
Assess Model Performance¶
In [16]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
prec = history.history['precision']
val_prec = history.history['val_precision']
recall = history.history['recall']
val_recall = history.history['val_recall']
In [17]:
plt.figure(figsize=(8, 8))
plt.subplot(4, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()),1])
plt.title('Training and Validation Accuracy')
plt.subplot(4, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0,1.0])
plt.title('Training and Validation Loss')
plt.subplot(4, 1, 3)
plt.plot(prec, label='Training Precision')
plt.plot(val_prec, label='Validation Precision')
plt.legend(loc='upper right')
plt.ylabel('Precision')
plt.ylim([min(plt.ylim()),1])
plt.title('Training and Validation Precision')
plt.subplot(4, 1, 4)
plt.plot(recall, label='Training Recall')
plt.plot(val_recall, label='Validation Recall')
plt.legend(loc='upper right')
plt.ylabel('Recall')
plt.ylim([min(plt.ylim()),1])
plt.title('Training and Validation Recall')
plt.xlabel('epoch')
plt.subplots_adjust(hspace=0.5)
plt.show()
In [18]:
loss_train, accuracy_train, precision_train, recall_train = model.evaluate(train_dataset)
print(f'Train Loss : {loss_train:0.2f}, Accuracy: {accuracy_train:0.2f}, Precision: {precision_train:0.2f}, Recall: {recall_train:0.2f}.')
87/87 [==============================] - 10s 117ms/step - loss: 2.7762e-05 - accuracy: 1.0000 - precision: 1.0000 - recall: 1.0000 Train Loss : 0.00, Accuracy: 1.00, Precision: 1.00, Recall: 1.00.
In [19]:
loss_valid, accuracy_valid, precision_valid, recall_valid = model.evaluate(validation_dataset)
print(f'Validation Loss : {loss_valid:0.2f}, Accuracy: {accuracy_valid:0.2f}, Precision: {precision_valid:0.2f}, Recall: {recall_valid:0.2f}.')
17/17 [==============================] - 2s 108ms/step - loss: 0.0270 - accuracy: 0.9904 - precision: 0.9961 - recall: 0.9961 Validation Loss : 0.03, Accuracy: 0.99, Precision: 1.00, Recall: 1.00.
In [20]:
loss_test, accuracy_test, precision_test, recall_test = model.evaluate(test_dataset)
print(f'Test Loss : {loss_test:0.2f}, Accuracy: {accuracy_test:0.2f}, Precision: {precision_test:0.2f}, Recall: {recall_test:0.2f}.')
6/6 [==============================] - 1s 102ms/step - loss: 0.0188 - accuracy: 0.9942 - precision: 0.9885 - recall: 0.9885 Test Loss : 0.02, Accuracy: 0.99, Precision: 0.99, Recall: 0.99.
Access Model Performance on Test Images¶
In [21]:
# Retrieve a batch of images from the test set
image_batch, label_batch = test_dataset.as_numpy_iterator().next()
scores = model.predict_on_batch(image_batch).flatten()
predictions = tf.where(scores < 0.5, 0, 1)
#print('Predictions:\n', predictions.numpy())
print('Probabilities:\n', scores)
print('Labels:\n', label_batch)
plt.figure(figsize=(10, 10))
for i in range(25):
ax = plt.subplot(5, 5, i + 1)
plt.imshow(image_batch[i].astype("uint8"))
plt.title(class_names[predictions[i]])
plt.axis("off");
Probabilities: [1.00000000e+00 1.00000000e+00 1.26377427e-06 1.00000000e+00 9.99999702e-01 9.99752223e-01 1.13389786e-09 9.99871254e-01 6.53493218e-04 1.21684351e-08 8.51761861e-09 1.11187983e-05 2.98427298e-13 9.99984562e-01 9.99994040e-01 2.67258889e-11 8.81878681e-10 9.99327600e-01 9.99999821e-01 9.99990702e-01 3.28491470e-11 1.00000000e+00 9.28917706e-01 1.13656251e-02 1.09751824e-12 1.11083105e-11 9.99988437e-01 9.99986172e-01 1.00000000e+00 1.00000000e+00 6.95624808e-11 1.95843949e-11] Labels: [1 1 0 1 1 1 0 1 0 0 0 0 0 1 1 0 0 1 1 1 0 1 0 0 0 0 1 1 1 1 0 0]
Save Model¶
In [22]:
if False:
# Save the model to HDF5 format
model.save(r'model/goat.h5')