Deep Learning

Practice Projects

P2: Multi-Label Classification

Step 0. Style and Libraries

Let's choose a style of the Jupyter notebook and import the software libraries. The command hide_code will hide the code cells.

In [1]:
%%html
<style>
@import url('https://fonts.googleapis.com/css?family=Orbitron|Roboto');
body {background-color: aliceblue;} 
a {color: #4876ff; font-family: 'Roboto';} 
h1 {color: #348ABD; font-family: 'Orbitron'; text-shadow: 4px 4px 4px #ccc;} 
h2, h3 {color: slategray; font-family: 'Roboto'; text-shadow: 4px 4px 4px #ccc;}
h4 {color: #348ABD; font-family: 'Orbitron';}
span {text-shadow: 4px 4px 4px #ccc;}
div.output_prompt, div.output_area pre {color: slategray;}
div.input_prompt, div.output_subarea {color: #4876ff;}      
div.output_stderr pre {background-color: aliceblue;}  
div.output_stderr {background-color: slategrey;}                        
</style>
<script>
code_show = true; 
function code_display() {
    if (code_show) {
        $('div.input').each(function(id) {
            if (id == 0 || $(this).html().indexOf('hide_code') > -1) {$(this).hide();}
        });
        $('div.output_prompt').css('opacity', 0);
    } else {
        $('div.input').each(function(id) {$(this).show();});
        $('div.output_prompt').css('opacity', 1);
    };
    code_show = !code_show;
} 
$(document).ready(code_display);
</script>
<form action="javascript: code_display()">
<input style="color: #348ABD; background: aliceblue; opacity: 0.8;" \ 
type="submit" value="Click to display or hide code cells">
</form>                  
In [38]:
hide_code = ''
import numpy as np 
import pandas as pd
import tensorflow as tf

from PIL import ImageFile
from tqdm import tqdm
import h5py
import cv2

import matplotlib.pylab as plt
from matplotlib import cm
%matplotlib inline

import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)

from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier

from keras.utils import to_categorical
from keras.preprocessing import image as keras_image
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator

from keras.models import Sequential, load_model, Model
from keras.layers import Input, BatchNormalization
from keras.layers import Dense, LSTM, GlobalAveragePooling1D, GlobalAveragePooling2D
from keras.layers import Activation, Flatten, Dropout, BatchNormalization
from keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D
from keras.layers.advanced_activations import PReLU, LeakyReLU

from keras.applications.inception_v3 import InceptionV3, preprocess_input
import scipy
from scipy import misc
In [3]:
hide_code
# Plot the Neural network fitting history
def history_plot(fit_history, n):
    plt.figure(figsize=(18, 12))
    
    plt.subplot(211)
    plt.plot(fit_history.history['loss'][n:], color='slategray', label = 'train')
    plt.plot(fit_history.history['val_loss'][n:], color='#4876ff', label = 'valid')
    plt.xlabel("Epochs")
    plt.ylabel("Loss")
    plt.legend()
    plt.title('Loss Function');  
    
    plt.subplot(212)
    plt.plot(fit_history.history['acc'][n:], color='slategray', label = 'train')
    plt.plot(fit_history.history['val_acc'][n:], color='#4876ff', label = 'valid')
    plt.xlabel("Epochs")
    plt.ylabel("Accuracy")    
    plt.legend()
    plt.title('Accuracy');

Step 1. Load and Explore the Data

For this project, I have created the dataset of 1650 (50x33) color images (32x32x3) with 33 handwritten letters.

Run the following cell to download the dataset.

In [10]:
hide_code
# Function for processing an image
def image_to_tensor(img_path):
    img = keras_image.load_img("data/" + img_path, target_size=(32, 32))
    x = keras_image.img_to_array(img)
    return np.expand_dims(x, axis=0)
# Function for creating the data tensor
def data_to_tensor(img_paths):
    list_of_tensors = [image_to_tensor(img_path) for img_path in tqdm(img_paths)]
    return np.vstack(list_of_tensors)

ImageFile.LOAD_TRUNCATED_IMAGES = True 
# Load the data
data = pd.read_csv("data/letters.csv")
files = data['file']
letters = data['letter']
backgrounds = data['background']
targets = data['label'].values
tensors = data_to_tensor(files);
100%|██████████| 1650/1650 [01:02<00:00, 26.27it/s]
In [11]:
hide_code
# Print the shape 
print ('Tensor shape:', tensors.shape)
print ('Target shape', targets.shape)
Tensor shape: (1650, 32, 32, 3)
Target shape (1650,)
In [12]:
hide_code
# Read from files and display images using OpenCV
def display_images(img_path, ax):
    img = cv2.imread("data/" + img_path)
    ax.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
    
fig = plt.figure(figsize=(18, 6))
for i in range(12):
    ax = fig.add_subplot(2, 6, i + 1, xticks=[], yticks=[], title=letters[i*50])
    display_images(files[i*50], ax)

Step 2. Save and Load the Data

The data tensors can be saved in the appropriate format of files .h5.

In [9]:
hide_code
# Create the tensor file
with h5py.File('LetterColorImages.h5', 'w') as f:
    f.create_dataset('images', data = tensors)
    f.create_dataset('labels', data = targets)
    f.create_dataset('backgrounds', data = backgrounds)
    f.close()
In [13]:
hide_code
# Read the h5 file
f = h5py.File('LetterColorImages.h5', 'r')

# List all groups
keys = list(f.keys())
keys
Out[13]:
['backgrounds', 'images', 'labels']
In [14]:
hide_code
# Create tensors and targets
tensors = np.array(f[keys[1]])
targets = np.array(f[keys[2]])
print ('Tensor shape:', tensors.shape)
print ('Target shape', targets.shape)
Tensor shape: (1650, 32, 32, 3)
Target shape (1650,)

Step 3. Implement Preprocess Functions

Normalize and Gray Scale

In the cell below, normalize the image tensors, and return them as a normalized Numpy array.

In [15]:
hide_code
# Normalize the tensors
tensors = tensors.astype('float32')/255
In [16]:
hide_code
# Read and display a tensor using Matplotlib
print('Label: ', letters[100])
plt.figure(figsize=(3,3))
plt.imshow(tensors[100]);
Label:  в

Create tensors of grayscaled images and display their shape.

In [17]:
hide_code
# Grayscaled tensors
gray_tensors = np.dot(tensors[...,:3], [0.299, 0.587, 0.114])
print ('Grayscaled Tensor shape:', gray_tensors.shape)
Grayscaled Tensor shape: (1650, 32, 32)
In [18]:
hide_code
# Read and display a grayscaled tensor using Matplotlib
print('Label: ', letters[100])
plt.figure(figsize=(3,3))
plt.imshow(gray_tensors[100], cmap=cm.bone);
Label:  в

One-hot encode

Now we'll implement the one-hot encoding function to_categorical.

In [19]:
hide_code
# Print the target unique values
print(set(targets))
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33}
In [20]:
hide_code
# One-hot encode the targets, started from the zero label
cat_targets = to_categorical(np.array(targets-1), 33)
cat_targets.shape
Out[20]:
(1650, 33)
In [21]:
hide_code
# One-hot encode the background targets
backgrounds = to_categorical(backgrounds, 2)
backgrounds.shape
Out[21]:
(1650, 2)

Add background

In [22]:
hide_code
# Create multi-label targets
back_targets = np.concatenate((cat_targets, backgrounds), axis=1)
back_targets.shape
Out[22]:
(1650, 35)

Split

Color Images

Apply the function train_test_split and split the data into training and testing sets.

Set up the size for the test set - 10% and for the validation set - 10%.

In [23]:
hide_code
# Split the data
x_train, x_test, y_train, y_test = train_test_split(tensors, cat_targets, 
                                                    test_size = 0.2, 
                                                    random_state = 1)
n = int(len(x_test)/2)
x_valid, y_valid = x_test[:n], y_test[:n]
x_test, y_test = x_test[n:], y_test[n:]
In [24]:
hide_code
# Print the shape
x_train.shape, x_valid.shape, x_test.shape, y_train.shape, y_valid.shape, y_test.shape
Out[24]:
((1320, 32, 32, 3),
 (165, 32, 32, 3),
 (165, 32, 32, 3),
 (1320, 33),
 (165, 33),
 (165, 33))

Grayscaled Images

In [25]:
hide_code
# Split the grayscaled data
x_train2, x_test2, y_train2, y_test2 = train_test_split(gray_tensors, cat_targets, 
                                                        test_size = 0.2, 
                                                        random_state = 1)
x_valid2, y_valid2 = x_test2[:n], y_test2[:n]
x_test2, y_test2 = x_test2[n:], y_test2[n:]
In [26]:
hide_code
# Reshape the grayscaled data
x_train2, x_test2, x_valid2 = \
x_train2.reshape(-1, 32, 32, 1), x_test2.reshape(-1, 32, 32, 1), x_valid2.reshape(-1, 32, 32, 1)
In [27]:
hide_code
# Print the shape
x_train2.shape, x_valid2.shape, x_test2.shape, y_train2.shape, y_valid2.shape, y_test2.shape
Out[27]:
((1320, 32, 32, 1),
 (165, 32, 32, 1),
 (165, 32, 32, 1),
 (1320, 33),
 (165, 33),
 (165, 33))
In [28]:
# Convert images from grayscaled to RGB
x_train2_tensor = tf.image.grayscale_to_rgb(x_train2, name=None)
x_test2_tensor = tf.image.grayscale_to_rgb(x_test2, name=None)
x_valid2_tensor = tf.image.grayscale_to_rgb(x_valid2, name=None)
# Run tensorflow session
sess = tf.Session()
with sess.as_default():
    x_train2_color = x_train2_tensor.eval()
    x_test2_color = x_test2_tensor.eval()
    x_valid2_color = x_valid2_tensor.eval()
# Check the shape    
x_train2_color.shape, x_test2_color.shape, x_valid2_color.shape
Out[28]:
((1320, 32, 32, 3), (165, 32, 32, 3), (165, 32, 32, 3))

Multi-label targets, color images

In [29]:
hide_code
# Split with multi-label targets
x_train3, x_test3, y_train3, y_test3 = train_test_split(tensors, back_targets, 
                                                        test_size = 0.2, 
                                                        random_state = 1)
x_valid3, y_valid3 = x_test3[:n], y_test3[:n]
x_test3, y_test3 = x_test3[n:], y_test3[n:]
In [30]:
hide_code
# Print the shape
x_train3.shape, x_valid3.shape, x_test3.shape, y_train3.shape, y_valid3.shape, y_test3.shape
Out[30]:
((1320, 32, 32, 3),
 (165, 32, 32, 3),
 (165, 32, 32, 3),
 (1320, 35),
 (165, 35),
 (165, 35))
In [31]:
hide_code
# Create a list of targets
y_train3_list = [y_train3[:, :33], y_train3[:, 33:]]
y_test3_list = [y_test3[:, :33], y_valid3[:, 33:]]
y_valid3_list = [y_valid3[:, :33], y_valid3[:, 33:]]

Multi-label targets, grayscaled images

In [32]:
hide_code
# Split the grayscaled data
x_train4, x_test4, y_train4, y_test4 = train_test_split(gray_tensors, back_targets, 
                                                        test_size = 0.2, 
                                                        random_state = 1)
x_valid4, y_valid4 = x_test4[:n], y_test4[:n]
x_test4, y_test4 = x_test4[n:], y_test4[n:]
In [33]:
hide_code
# Reshape the grayscaled data
x_train4, x_test4, x_valid4 = \
x_train4.reshape(-1, 32, 32, 1), x_test4.reshape(-1, 32, 32, 1), x_valid4.reshape(-1, 32, 32, 1)
In [34]:
hide_code
# Print the shape
x_train4.shape, x_valid4.shape, x_test4.shape, y_train4.shape, y_valid4.shape, y_test4.shape
Out[34]:
((1320, 32, 32, 1),
 (165, 32, 32, 1),
 (165, 32, 32, 1),
 (1320, 35),
 (165, 35),
 (165, 35))
In [35]:
hide_code
# Create a list of targets
y_train4_list = [y_train4[:, :33], y_train4[:, 33:]]
y_test4_list = [y_test4[:, :33], y_test4[:, 33:]]
y_valid4_list = [y_valid4[:, :33], y_valid4[:, 33:]]

Step 4. Create a One-Label Classification Model

Color Images

Define a model architecture and compile the model for color images.

In [103]:
hide_code
def model():
    model = Sequential()
    # TODO: Define a model architecture

    model.add(Conv2D(32, (5, 5), padding='same', input_shape=x_train.shape[1:]))
    model.add(LeakyReLU(alpha=0.02))
    
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.2))

    model.add(Conv2D(196, (5, 5)))
    model.add(LeakyReLU(alpha=0.02))
    
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.2))

    model.add(GlobalMaxPooling2D())
    
    model.add(Dense(1024))
    model.add(LeakyReLU(alpha=0.02))
    model.add(Dropout(0.5)) 
    
    model.add(Dense(33))
    model.add(Activation('softmax'))
    
    # TODO: Compile the model
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    
    return model

model = model()
In [42]:
hide_code
# Create callbacks
checkpointer = ModelCheckpoint(filepath='weights.best.model.hdf5', 
                               verbose=2, save_best_only=True)
lr_reduction = ReduceLROnPlateau(monitor='val_loss', 
                                 patience=5, verbose=2, factor=0.2)
In [43]:
hide_code
# Train the model
history = model.fit(x_train, y_train, 
                    epochs=100, batch_size=64, verbose=2,
                    validation_data=(x_valid, y_valid),
                    callbacks=[checkpointer, lr_reduction])
Train on 1320 samples, validate on 165 samples
Epoch 1/100
 - 10s - loss: 3.5201 - acc: 0.0227 - val_loss: 3.4983 - val_acc: 0.0485

Epoch 00001: val_loss improved from inf to 3.49834, saving model to weights.best.model.hdf5
Epoch 2/100
 - 8s - loss: 3.4917 - acc: 0.0508 - val_loss: 3.4904 - val_acc: 0.0424

Epoch 00002: val_loss improved from 3.49834 to 3.49045, saving model to weights.best.model.hdf5
Epoch 3/100
 - 8s - loss: 3.4649 - acc: 0.0462 - val_loss: 3.4723 - val_acc: 0.0424

Epoch 00003: val_loss improved from 3.49045 to 3.47232, saving model to weights.best.model.hdf5
Epoch 4/100
 - 7s - loss: 3.4662 - acc: 0.0530 - val_loss: 3.4712 - val_acc: 0.0424

Epoch 00004: val_loss improved from 3.47232 to 3.47119, saving model to weights.best.model.hdf5
Epoch 5/100
 - 7s - loss: 3.4512 - acc: 0.0402 - val_loss: 3.4717 - val_acc: 0.0606

Epoch 00005: val_loss did not improve from 3.47119
Epoch 6/100
 - 8s - loss: 3.4468 - acc: 0.0508 - val_loss: 3.4657 - val_acc: 0.0606

Epoch 00006: val_loss improved from 3.47119 to 3.46569, saving model to weights.best.model.hdf5
Epoch 7/100
 - 8s - loss: 3.4481 - acc: 0.0447 - val_loss: 3.4675 - val_acc: 0.0606

Epoch 00007: val_loss did not improve from 3.46569
Epoch 8/100
 - 8s - loss: 3.4386 - acc: 0.0515 - val_loss: 3.4592 - val_acc: 0.0485

Epoch 00008: val_loss improved from 3.46569 to 3.45923, saving model to weights.best.model.hdf5
Epoch 9/100
 - 8s - loss: 3.4385 - acc: 0.0417 - val_loss: 3.4580 - val_acc: 0.0303

Epoch 00009: val_loss improved from 3.45923 to 3.45796, saving model to weights.best.model.hdf5
Epoch 10/100
 - 8s - loss: 3.4324 - acc: 0.0553 - val_loss: 3.4562 - val_acc: 0.0364

Epoch 00010: val_loss improved from 3.45796 to 3.45622, saving model to weights.best.model.hdf5
Epoch 11/100
 - 8s - loss: 3.4335 - acc: 0.0500 - val_loss: 3.4577 - val_acc: 0.0485

Epoch 00011: val_loss did not improve from 3.45622
Epoch 12/100
 - 8s - loss: 3.4205 - acc: 0.0629 - val_loss: 3.4614 - val_acc: 0.0485

Epoch 00012: val_loss did not improve from 3.45622
Epoch 13/100
 - 8s - loss: 3.4191 - acc: 0.0561 - val_loss: 3.4493 - val_acc: 0.0667

Epoch 00013: val_loss improved from 3.45622 to 3.44930, saving model to weights.best.model.hdf5
Epoch 14/100
 - 8s - loss: 3.4138 - acc: 0.0591 - val_loss: 3.4446 - val_acc: 0.0727

Epoch 00014: val_loss improved from 3.44930 to 3.44458, saving model to weights.best.model.hdf5
Epoch 15/100
 - 9s - loss: 3.4139 - acc: 0.0667 - val_loss: 3.4452 - val_acc: 0.0545

Epoch 00015: val_loss did not improve from 3.44458
Epoch 16/100
 - 9s - loss: 3.3961 - acc: 0.0652 - val_loss: 3.4531 - val_acc: 0.0424

Epoch 00016: val_loss did not improve from 3.44458
Epoch 17/100
 - 8s - loss: 3.3981 - acc: 0.0629 - val_loss: 3.4430 - val_acc: 0.0667

Epoch 00017: val_loss improved from 3.44458 to 3.44303, saving model to weights.best.model.hdf5
Epoch 18/100
 - 8s - loss: 3.3847 - acc: 0.0727 - val_loss: 3.4330 - val_acc: 0.0727

Epoch 00018: val_loss improved from 3.44303 to 3.43303, saving model to weights.best.model.hdf5
Epoch 19/100
 - 8s - loss: 3.3755 - acc: 0.0674 - val_loss: 3.4248 - val_acc: 0.0788

Epoch 00019: val_loss improved from 3.43303 to 3.42481, saving model to weights.best.model.hdf5
Epoch 20/100
 - 8s - loss: 3.3623 - acc: 0.0652 - val_loss: 3.4440 - val_acc: 0.0606

Epoch 00020: val_loss did not improve from 3.42481
Epoch 21/100
 - 8s - loss: 3.3549 - acc: 0.0712 - val_loss: 3.4454 - val_acc: 0.0485

Epoch 00021: val_loss did not improve from 3.42481
Epoch 22/100
 - 8s - loss: 3.3607 - acc: 0.0773 - val_loss: 3.4141 - val_acc: 0.0788

Epoch 00022: val_loss improved from 3.42481 to 3.41411, saving model to weights.best.model.hdf5
Epoch 23/100
 - 7s - loss: 3.3451 - acc: 0.0788 - val_loss: 3.4014 - val_acc: 0.0727

Epoch 00023: val_loss improved from 3.41411 to 3.40135, saving model to weights.best.model.hdf5
Epoch 24/100
 - 7s - loss: 3.3383 - acc: 0.0689 - val_loss: 3.4434 - val_acc: 0.0606

Epoch 00024: val_loss did not improve from 3.40135
Epoch 25/100
 - 7s - loss: 3.3319 - acc: 0.0811 - val_loss: 3.4040 - val_acc: 0.0727

Epoch 00025: val_loss did not improve from 3.40135
Epoch 26/100
 - 8s - loss: 3.2931 - acc: 0.0811 - val_loss: 3.4176 - val_acc: 0.0727

Epoch 00026: val_loss did not improve from 3.40135
Epoch 27/100
 - 8s - loss: 3.2726 - acc: 0.0871 - val_loss: 3.4053 - val_acc: 0.0848

Epoch 00027: val_loss did not improve from 3.40135
Epoch 28/100
 - 8s - loss: 3.2587 - acc: 0.0917 - val_loss: 3.4018 - val_acc: 0.0667

Epoch 00028: val_loss did not improve from 3.40135

Epoch 00028: ReduceLROnPlateau reducing learning rate to 0.00020000000949949026.
Epoch 29/100
 - 8s - loss: 3.2416 - acc: 0.0939 - val_loss: 3.3679 - val_acc: 0.1030

Epoch 00029: val_loss improved from 3.40135 to 3.36793, saving model to weights.best.model.hdf5
Epoch 30/100
 - 8s - loss: 3.2222 - acc: 0.1015 - val_loss: 3.3442 - val_acc: 0.0970

Epoch 00030: val_loss improved from 3.36793 to 3.34420, saving model to weights.best.model.hdf5
Epoch 31/100
 - 8s - loss: 3.2247 - acc: 0.1015 - val_loss: 3.3797 - val_acc: 0.0788

Epoch 00031: val_loss did not improve from 3.34420
Epoch 32/100
 - 8s - loss: 3.1923 - acc: 0.1091 - val_loss: 3.3322 - val_acc: 0.1030

Epoch 00032: val_loss improved from 3.34420 to 3.33220, saving model to weights.best.model.hdf5
Epoch 33/100
 - 8s - loss: 3.1903 - acc: 0.1098 - val_loss: 3.3333 - val_acc: 0.0909

Epoch 00033: val_loss did not improve from 3.33220
Epoch 34/100
 - 8s - loss: 3.1905 - acc: 0.1038 - val_loss: 3.3252 - val_acc: 0.1030

Epoch 00034: val_loss improved from 3.33220 to 3.32515, saving model to weights.best.model.hdf5
Epoch 35/100
 - 7s - loss: 3.1664 - acc: 0.1114 - val_loss: 3.3170 - val_acc: 0.1030

Epoch 00035: val_loss improved from 3.32515 to 3.31699, saving model to weights.best.model.hdf5
Epoch 36/100
 - 7s - loss: 3.1519 - acc: 0.1182 - val_loss: 3.3145 - val_acc: 0.0970

Epoch 00036: val_loss improved from 3.31699 to 3.31450, saving model to weights.best.model.hdf5
Epoch 37/100
 - 8s - loss: 3.1524 - acc: 0.1174 - val_loss: 3.3100 - val_acc: 0.0970

Epoch 00037: val_loss improved from 3.31450 to 3.30996, saving model to weights.best.model.hdf5
Epoch 38/100
 - 8s - loss: 3.1399 - acc: 0.1076 - val_loss: 3.3012 - val_acc: 0.1030

Epoch 00038: val_loss improved from 3.30996 to 3.30118, saving model to weights.best.model.hdf5
Epoch 39/100
 - 8s - loss: 3.1376 - acc: 0.1121 - val_loss: 3.3031 - val_acc: 0.1091

Epoch 00039: val_loss did not improve from 3.30118
Epoch 40/100
 - 9s - loss: 3.1335 - acc: 0.1144 - val_loss: 3.2867 - val_acc: 0.1091

Epoch 00040: val_loss improved from 3.30118 to 3.28671, saving model to weights.best.model.hdf5
Epoch 41/100
 - 8s - loss: 3.1253 - acc: 0.1197 - val_loss: 3.3013 - val_acc: 0.0848

Epoch 00041: val_loss did not improve from 3.28671
Epoch 42/100
 - 8s - loss: 3.1173 - acc: 0.1167 - val_loss: 3.2819 - val_acc: 0.1212

Epoch 00042: val_loss improved from 3.28671 to 3.28192, saving model to weights.best.model.hdf5
Epoch 43/100
 - 9s - loss: 3.1189 - acc: 0.1098 - val_loss: 3.2723 - val_acc: 0.1091

Epoch 00043: val_loss improved from 3.28192 to 3.27230, saving model to weights.best.model.hdf5
Epoch 44/100
 - 9s - loss: 3.1148 - acc: 0.1235 - val_loss: 3.2689 - val_acc: 0.1091

Epoch 00044: val_loss improved from 3.27230 to 3.26891, saving model to weights.best.model.hdf5
Epoch 45/100
 - 9s - loss: 3.1080 - acc: 0.1152 - val_loss: 3.2593 - val_acc: 0.1273

Epoch 00045: val_loss improved from 3.26891 to 3.25933, saving model to weights.best.model.hdf5
Epoch 46/100
 - 9s - loss: 3.1063 - acc: 0.1174 - val_loss: 3.2591 - val_acc: 0.1030

Epoch 00046: val_loss improved from 3.25933 to 3.25909, saving model to weights.best.model.hdf5
Epoch 47/100
 - 9s - loss: 3.0857 - acc: 0.1152 - val_loss: 3.2615 - val_acc: 0.1091

Epoch 00047: val_loss did not improve from 3.25909
Epoch 48/100
 - 9s - loss: 3.0739 - acc: 0.1250 - val_loss: 3.2476 - val_acc: 0.0909

Epoch 00048: val_loss improved from 3.25909 to 3.24761, saving model to weights.best.model.hdf5
Epoch 49/100
 - 9s - loss: 3.0791 - acc: 0.1205 - val_loss: 3.2482 - val_acc: 0.0970

Epoch 00049: val_loss did not improve from 3.24761
Epoch 50/100
 - 8s - loss: 3.0794 - acc: 0.1197 - val_loss: 3.2367 - val_acc: 0.1030

Epoch 00050: val_loss improved from 3.24761 to 3.23672, saving model to weights.best.model.hdf5
Epoch 51/100
 - 9s - loss: 3.0709 - acc: 0.1318 - val_loss: 3.2365 - val_acc: 0.1152

Epoch 00051: val_loss improved from 3.23672 to 3.23647, saving model to weights.best.model.hdf5
Epoch 52/100
 - 9s - loss: 3.0574 - acc: 0.1348 - val_loss: 3.2403 - val_acc: 0.1152

Epoch 00052: val_loss did not improve from 3.23647
Epoch 53/100
 - 9s - loss: 3.0523 - acc: 0.1197 - val_loss: 3.2325 - val_acc: 0.0909

Epoch 00053: val_loss improved from 3.23647 to 3.23253, saving model to weights.best.model.hdf5
Epoch 54/100
 - 8s - loss: 3.0538 - acc: 0.1280 - val_loss: 3.2384 - val_acc: 0.1212

Epoch 00054: val_loss did not improve from 3.23253
Epoch 55/100
 - 8s - loss: 3.0487 - acc: 0.1205 - val_loss: 3.2269 - val_acc: 0.1091

Epoch 00055: val_loss improved from 3.23253 to 3.22688, saving model to weights.best.model.hdf5
Epoch 56/100
 - 8s - loss: 3.0402 - acc: 0.1288 - val_loss: 3.2153 - val_acc: 0.1091

Epoch 00056: val_loss improved from 3.22688 to 3.21533, saving model to weights.best.model.hdf5
Epoch 57/100
 - 8s - loss: 3.0384 - acc: 0.1235 - val_loss: 3.2172 - val_acc: 0.0909

Epoch 00057: val_loss did not improve from 3.21533
Epoch 58/100
 - 8s - loss: 3.0081 - acc: 0.1409 - val_loss: 3.2083 - val_acc: 0.1030

Epoch 00058: val_loss improved from 3.21533 to 3.20834, saving model to weights.best.model.hdf5
Epoch 59/100
 - 8s - loss: 3.0188 - acc: 0.1326 - val_loss: 3.2044 - val_acc: 0.1091

Epoch 00059: val_loss improved from 3.20834 to 3.20440, saving model to weights.best.model.hdf5
Epoch 60/100
 - 8s - loss: 3.0208 - acc: 0.1394 - val_loss: 3.2038 - val_acc: 0.0970

Epoch 00060: val_loss improved from 3.20440 to 3.20385, saving model to weights.best.model.hdf5
Epoch 61/100
 - 8s - loss: 3.0156 - acc: 0.1333 - val_loss: 3.2419 - val_acc: 0.1152

Epoch 00061: val_loss did not improve from 3.20385
Epoch 62/100
 - 8s - loss: 3.0142 - acc: 0.1280 - val_loss: 3.1889 - val_acc: 0.1030

Epoch 00062: val_loss improved from 3.20385 to 3.18891, saving model to weights.best.model.hdf5
Epoch 63/100
 - 10s - loss: 3.0066 - acc: 0.1432 - val_loss: 3.1827 - val_acc: 0.0970

Epoch 00063: val_loss improved from 3.18891 to 3.18274, saving model to weights.best.model.hdf5
Epoch 64/100
 - 9s - loss: 3.0055 - acc: 0.1288 - val_loss: 3.1993 - val_acc: 0.0788

Epoch 00064: val_loss did not improve from 3.18274
Epoch 65/100
 - 9s - loss: 3.0144 - acc: 0.1242 - val_loss: 3.1889 - val_acc: 0.0970

Epoch 00065: val_loss did not improve from 3.18274
Epoch 66/100
 - 9s - loss: 2.9995 - acc: 0.1356 - val_loss: 3.1720 - val_acc: 0.1030

Epoch 00066: val_loss improved from 3.18274 to 3.17200, saving model to weights.best.model.hdf5
Epoch 67/100
 - 9s - loss: 3.0004 - acc: 0.1288 - val_loss: 3.1702 - val_acc: 0.1030

Epoch 00067: val_loss improved from 3.17200 to 3.17022, saving model to weights.best.model.hdf5
Epoch 68/100
 - 9s - loss: 2.9619 - acc: 0.1492 - val_loss: 3.1672 - val_acc: 0.1030

Epoch 00068: val_loss improved from 3.17022 to 3.16721, saving model to weights.best.model.hdf5
Epoch 69/100
 - 8s - loss: 2.9639 - acc: 0.1432 - val_loss: 3.1649 - val_acc: 0.0909

Epoch 00069: val_loss improved from 3.16721 to 3.16487, saving model to weights.best.model.hdf5
Epoch 70/100
 - 9s - loss: 2.9454 - acc: 0.1477 - val_loss: 3.1578 - val_acc: 0.0848

Epoch 00070: val_loss improved from 3.16487 to 3.15780, saving model to weights.best.model.hdf5
Epoch 71/100
 - 10s - loss: 2.9482 - acc: 0.1477 - val_loss: 3.1838 - val_acc: 0.1091

Epoch 00071: val_loss did not improve from 3.15780
Epoch 72/100
 - 10s - loss: 2.9406 - acc: 0.1538 - val_loss: 3.1574 - val_acc: 0.1152

Epoch 00072: val_loss improved from 3.15780 to 3.15739, saving model to weights.best.model.hdf5
Epoch 73/100
 - 10s - loss: 2.9213 - acc: 0.1568 - val_loss: 3.1482 - val_acc: 0.1212

Epoch 00073: val_loss improved from 3.15739 to 3.14815, saving model to weights.best.model.hdf5
Epoch 74/100
 - 9s - loss: 2.9399 - acc: 0.1432 - val_loss: 3.1453 - val_acc: 0.0909

Epoch 00074: val_loss improved from 3.14815 to 3.14531, saving model to weights.best.model.hdf5
Epoch 75/100
 - 10s - loss: 2.9407 - acc: 0.1538 - val_loss: 3.1467 - val_acc: 0.0970

Epoch 00075: val_loss did not improve from 3.14531
Epoch 76/100
 - 11s - loss: 2.9224 - acc: 0.1568 - val_loss: 3.1218 - val_acc: 0.1091

Epoch 00076: val_loss improved from 3.14531 to 3.12185, saving model to weights.best.model.hdf5
Epoch 77/100
 - 10s - loss: 2.9214 - acc: 0.1614 - val_loss: 3.1211 - val_acc: 0.1091

Epoch 00077: val_loss improved from 3.12185 to 3.12109, saving model to weights.best.model.hdf5
Epoch 78/100
 - 10s - loss: 2.8997 - acc: 0.1636 - val_loss: 3.1223 - val_acc: 0.1273

Epoch 00078: val_loss did not improve from 3.12109
Epoch 79/100
 - 11s - loss: 2.8893 - acc: 0.1568 - val_loss: 3.1317 - val_acc: 0.0970

Epoch 00079: val_loss did not improve from 3.12109
Epoch 80/100
 - 9s - loss: 2.9002 - acc: 0.1545 - val_loss: 3.1156 - val_acc: 0.1030

Epoch 00080: val_loss improved from 3.12109 to 3.11562, saving model to weights.best.model.hdf5
Epoch 81/100
 - 10s - loss: 2.8672 - acc: 0.1712 - val_loss: 3.0986 - val_acc: 0.1091

Epoch 00081: val_loss improved from 3.11562 to 3.09865, saving model to weights.best.model.hdf5
Epoch 82/100
 - 10s - loss: 2.8718 - acc: 0.1591 - val_loss: 3.0918 - val_acc: 0.1394

Epoch 00082: val_loss improved from 3.09865 to 3.09185, saving model to weights.best.model.hdf5
Epoch 83/100
 - 9s - loss: 2.8476 - acc: 0.1697 - val_loss: 3.1044 - val_acc: 0.1152

Epoch 00083: val_loss did not improve from 3.09185
Epoch 84/100
 - 10s - loss: 2.8657 - acc: 0.1591 - val_loss: 3.1080 - val_acc: 0.0970

Epoch 00084: val_loss did not improve from 3.09185
Epoch 85/100
 - 10s - loss: 2.8438 - acc: 0.1735 - val_loss: 3.0800 - val_acc: 0.1152

Epoch 00085: val_loss improved from 3.09185 to 3.08000, saving model to weights.best.model.hdf5
Epoch 86/100
 - 10s - loss: 2.8553 - acc: 0.1720 - val_loss: 3.0786 - val_acc: 0.1273

Epoch 00086: val_loss improved from 3.08000 to 3.07857, saving model to weights.best.model.hdf5
Epoch 87/100
 - 9s - loss: 2.8386 - acc: 0.1841 - val_loss: 3.0681 - val_acc: 0.1273

Epoch 00087: val_loss improved from 3.07857 to 3.06815, saving model to weights.best.model.hdf5
Epoch 88/100
 - 9s - loss: 2.8228 - acc: 0.1886 - val_loss: 3.0691 - val_acc: 0.1212

Epoch 00088: val_loss did not improve from 3.06815
Epoch 89/100
 - 10s - loss: 2.8100 - acc: 0.1765 - val_loss: 3.0578 - val_acc: 0.1333

Epoch 00089: val_loss improved from 3.06815 to 3.05782, saving model to weights.best.model.hdf5
Epoch 90/100
 - 8s - loss: 2.8039 - acc: 0.2023 - val_loss: 3.0521 - val_acc: 0.1152

Epoch 00090: val_loss improved from 3.05782 to 3.05209, saving model to weights.best.model.hdf5
Epoch 91/100
 - 8s - loss: 2.7849 - acc: 0.1841 - val_loss: 3.0445 - val_acc: 0.1394

Epoch 00091: val_loss improved from 3.05209 to 3.04454, saving model to weights.best.model.hdf5
Epoch 92/100
 - 8s - loss: 2.7856 - acc: 0.2000 - val_loss: 3.0348 - val_acc: 0.1394

Epoch 00092: val_loss improved from 3.04454 to 3.03481, saving model to weights.best.model.hdf5
Epoch 93/100
 - 7s - loss: 2.7538 - acc: 0.1970 - val_loss: 3.0295 - val_acc: 0.1515

Epoch 00093: val_loss improved from 3.03481 to 3.02948, saving model to weights.best.model.hdf5
Epoch 94/100
 - 8s - loss: 2.7741 - acc: 0.2045 - val_loss: 3.0160 - val_acc: 0.1576

Epoch 00094: val_loss improved from 3.02948 to 3.01598, saving model to weights.best.model.hdf5
Epoch 95/100
 - 8s - loss: 2.7349 - acc: 0.2061 - val_loss: 3.0112 - val_acc: 0.1636

Epoch 00095: val_loss improved from 3.01598 to 3.01119, saving model to weights.best.model.hdf5
Epoch 96/100
 - 8s - loss: 2.7266 - acc: 0.2030 - val_loss: 3.0091 - val_acc: 0.1515

Epoch 00096: val_loss improved from 3.01119 to 3.00910, saving model to weights.best.model.hdf5
Epoch 97/100
 - 8s - loss: 2.7285 - acc: 0.2167 - val_loss: 3.0110 - val_acc: 0.1576

Epoch 00097: val_loss did not improve from 3.00910
Epoch 98/100
 - 9s - loss: 2.7261 - acc: 0.2129 - val_loss: 2.9846 - val_acc: 0.1818

Epoch 00098: val_loss improved from 3.00910 to 2.98458, saving model to weights.best.model.hdf5
Epoch 99/100
 - 9s - loss: 2.6906 - acc: 0.2265 - val_loss: 2.9615 - val_acc: 0.1455

Epoch 00099: val_loss improved from 2.98458 to 2.96147, saving model to weights.best.model.hdf5
Epoch 100/100
 - 9s - loss: 2.6769 - acc: 0.2273 - val_loss: 2.9679 - val_acc: 0.1697

Epoch 00100: val_loss did not improve from 2.96147

We should have an accuracy greater than 3%

In [44]:
hide_code
# Load the model with the best validation accuracy
model.load_weights('weights.best.model.hdf5')
# Calculate classification accuracy on the testing set
score = model.evaluate(x_test, y_test)
score
165/165 [==============================] - 0s 3ms/step
Out[44]:
[2.9150874094529584, 0.15757575757575756]

Apply the ImageDataGenerator() function.

In [45]:
hide_code
# Fit the model with ImageDataGenerator()
steps, epochs = 1000, 5
data_generator = ImageDataGenerator(zoom_range=0.2, shear_range=0.2, rotation_range=20)

generator = model.fit_generator(data_generator.flow(x_train, y_train, batch_size=64),
                                steps_per_epoch = steps, epochs = epochs,
                                validation_data = (x_valid, y_valid), 
                                callbacks=[checkpointer, lr_reduction], verbose=2)
Epoch 1/5
 - 452s - loss: 2.5762 - acc: 0.2427 - val_loss: 2.6819 - val_acc: 0.2242

Epoch 00001: val_loss improved from 2.96147 to 2.68193, saving model to weights.best.model.hdf5
Epoch 2/5
 - 461s - loss: 2.2461 - acc: 0.3352 - val_loss: 2.4237 - val_acc: 0.2909

Epoch 00002: val_loss improved from 2.68193 to 2.42368, saving model to weights.best.model.hdf5
Epoch 3/5
 - 418s - loss: 1.9200 - acc: 0.4279 - val_loss: 2.0991 - val_acc: 0.3758

Epoch 00003: val_loss improved from 2.42368 to 2.09913, saving model to weights.best.model.hdf5
Epoch 4/5
 - 425s - loss: 1.6372 - acc: 0.5038 - val_loss: 1.9231 - val_acc: 0.4242

Epoch 00004: val_loss improved from 2.09913 to 1.92313, saving model to weights.best.model.hdf5
Epoch 5/5
 - 380s - loss: 1.4137 - acc: 0.5626 - val_loss: 1.7656 - val_acc: 0.4303

Epoch 00005: val_loss improved from 1.92313 to 1.76556, saving model to weights.best.model.hdf5
In [105]:
hide_code
# Load the model with the best validation accuracy
model.load_weights('weights.best.model.hdf5')
# Calculate classification accuracy on the testing set
score = model.evaluate(x_test, y_test)
score
165/165 [==============================] - 5s 31ms/step
Out[105]:
[1.6711222280155529, 0.4666666668472868]

Let's compare the results with classifying algorithms.

In [47]:
hide_code
# Fit the classifier and get the accuracy score
y_train_c = np.array([np.argmax(y) for y in y_train])
y_test_c = np.array([np.argmax(y) for y in y_test])
clf = GradientBoostingClassifier().fit(x_train.reshape(-1, 32*32*3), y_train_c)
clf.score(x_test.reshape(-1, 32*32*3), y_test_c)
Out[47]:
0.10303030303030303
In [48]:
hide_code
# Fit the classifier and get the accuracy score
clf2 = RandomForestClassifier().fit(x_train.reshape(-1, 32*32*3), y_train_c)
clf2.score(x_test.reshape(-1, 32*32*3), y_test_c)
Out[48]:
0.09090909090909091

Grayscaled Images

In [106]:
hide_code
def gray_model():
    model = Sequential()
    # TODO: Define a model architecture

    model.add(Conv2D(32, (5, 5), padding='same', input_shape=x_train2.shape[1:]))
    model.add(LeakyReLU(alpha=0.02))    
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Conv2D(128, (5, 5)))
    model.add(LeakyReLU(alpha=0.02))    
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    
    model.add(GlobalMaxPooling2D()) 
    
    model.add(Dense(1024))
    model.add(LeakyReLU(alpha=0.02)) 
    model.add(Dropout(0.25)) 
    
    model.add(Dense(128))
    model.add(LeakyReLU(alpha=0.02)) 
    model.add(Dropout(0.25))    

    model.add(Dense(33))
    model.add(Activation('softmax'))
    
    # TODO: Compile the model
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
    
    return model

gray_model = gray_model()
In [64]:
hide_code
# Create callbacks
gray_checkpointer = ModelCheckpoint(filepath='weights.best.gray_model.hdf5', 
                                    verbose=2, save_best_only=True)
gray_lr_reduction = ReduceLROnPlateau(monitor='val_loss', 
                                      patience=10, verbose=2, factor=0.8)
In [65]:
hide_code
# Train the model
gray_history = gray_model.fit(x_train2, y_train2, 
                              epochs=200, batch_size=64, verbose=0,
                              validation_data=(x_valid2, y_valid2),
                              callbacks=[gray_checkpointer, gray_lr_reduction])
Epoch 00001: val_loss improved from inf to 3.49760, saving model to weights.best.gray_model.hdf5

Epoch 00002: val_loss did not improve from 3.49760

Epoch 00003: val_loss did not improve from 3.49760

Epoch 00004: val_loss did not improve from 3.49760

Epoch 00005: val_loss did not improve from 3.49760

Epoch 00006: val_loss improved from 3.49760 to 3.49566, saving model to weights.best.gray_model.hdf5

Epoch 00007: val_loss improved from 3.49566 to 3.49229, saving model to weights.best.gray_model.hdf5

Epoch 00008: val_loss did not improve from 3.49229

Epoch 00009: val_loss improved from 3.49229 to 3.48260, saving model to weights.best.gray_model.hdf5

Epoch 00010: val_loss improved from 3.48260 to 3.47657, saving model to weights.best.gray_model.hdf5

Epoch 00011: val_loss improved from 3.47657 to 3.45542, saving model to weights.best.gray_model.hdf5

Epoch 00012: val_loss did not improve from 3.45542

Epoch 00013: val_loss did not improve from 3.45542

Epoch 00014: val_loss improved from 3.45542 to 3.45171, saving model to weights.best.gray_model.hdf5

Epoch 00015: val_loss did not improve from 3.45171

Epoch 00016: val_loss improved from 3.45171 to 3.42426, saving model to weights.best.gray_model.hdf5

Epoch 00017: val_loss did not improve from 3.42426

Epoch 00018: val_loss improved from 3.42426 to 3.41524, saving model to weights.best.gray_model.hdf5

Epoch 00019: val_loss did not improve from 3.41524

Epoch 00020: val_loss did not improve from 3.41524

Epoch 00021: val_loss improved from 3.41524 to 3.40510, saving model to weights.best.gray_model.hdf5

Epoch 00022: val_loss did not improve from 3.40510

Epoch 00023: val_loss did not improve from 3.40510

Epoch 00024: val_loss did not improve from 3.40510

Epoch 00025: val_loss did not improve from 3.40510

Epoch 00026: val_loss did not improve from 3.40510

Epoch 00027: val_loss improved from 3.40510 to 3.38236, saving model to weights.best.gray_model.hdf5

Epoch 00028: val_loss improved from 3.38236 to 3.32602, saving model to weights.best.gray_model.hdf5

Epoch 00029: val_loss did not improve from 3.32602

Epoch 00030: val_loss did not improve from 3.32602

Epoch 00031: val_loss did not improve from 3.32602

Epoch 00032: val_loss improved from 3.32602 to 3.31155, saving model to weights.best.gray_model.hdf5

Epoch 00033: val_loss improved from 3.31155 to 3.22358, saving model to weights.best.gray_model.hdf5

Epoch 00034: val_loss did not improve from 3.22358

Epoch 00035: val_loss did not improve from 3.22358

Epoch 00036: val_loss did not improve from 3.22358

Epoch 00037: val_loss did not improve from 3.22358

Epoch 00038: val_loss improved from 3.22358 to 3.13291, saving model to weights.best.gray_model.hdf5

Epoch 00039: val_loss improved from 3.13291 to 3.05173, saving model to weights.best.gray_model.hdf5

Epoch 00040: val_loss did not improve from 3.05173

Epoch 00041: val_loss improved from 3.05173 to 3.01524, saving model to weights.best.gray_model.hdf5

Epoch 00042: val_loss did not improve from 3.01524

Epoch 00043: val_loss improved from 3.01524 to 2.89881, saving model to weights.best.gray_model.hdf5

Epoch 00044: val_loss did not improve from 2.89881

Epoch 00045: val_loss did not improve from 2.89881

Epoch 00046: val_loss improved from 2.89881 to 2.84637, saving model to weights.best.gray_model.hdf5

Epoch 00047: val_loss did not improve from 2.84637

Epoch 00048: val_loss improved from 2.84637 to 2.77642, saving model to weights.best.gray_model.hdf5

Epoch 00049: val_loss did not improve from 2.77642

Epoch 00050: val_loss did not improve from 2.77642

Epoch 00051: val_loss improved from 2.77642 to 2.76648, saving model to weights.best.gray_model.hdf5

Epoch 00052: val_loss did not improve from 2.76648

Epoch 00053: val_loss improved from 2.76648 to 2.70067, saving model to weights.best.gray_model.hdf5

Epoch 00054: val_loss did not improve from 2.70067

Epoch 00055: val_loss did not improve from 2.70067

Epoch 00056: val_loss did not improve from 2.70067

Epoch 00057: val_loss improved from 2.70067 to 2.64233, saving model to weights.best.gray_model.hdf5

Epoch 00058: val_loss improved from 2.64233 to 2.50863, saving model to weights.best.gray_model.hdf5

Epoch 00059: val_loss did not improve from 2.50863

Epoch 00060: val_loss did not improve from 2.50863

Epoch 00061: val_loss improved from 2.50863 to 2.50505, saving model to weights.best.gray_model.hdf5

Epoch 00062: val_loss did not improve from 2.50505

Epoch 00063: val_loss did not improve from 2.50505

Epoch 00064: val_loss did not improve from 2.50505

Epoch 00065: val_loss improved from 2.50505 to 2.41275, saving model to weights.best.gray_model.hdf5

Epoch 00066: val_loss improved from 2.41275 to 2.38365, saving model to weights.best.gray_model.hdf5

Epoch 00067: val_loss did not improve from 2.38365

Epoch 00068: val_loss improved from 2.38365 to 2.25207, saving model to weights.best.gray_model.hdf5

Epoch 00069: val_loss did not improve from 2.25207

Epoch 00070: val_loss did not improve from 2.25207

Epoch 00071: val_loss did not improve from 2.25207

Epoch 00072: val_loss did not improve from 2.25207

Epoch 00073: val_loss did not improve from 2.25207

Epoch 00074: val_loss did not improve from 2.25207

Epoch 00075: val_loss improved from 2.25207 to 2.14117, saving model to weights.best.gray_model.hdf5

Epoch 00076: val_loss improved from 2.14117 to 2.12025, saving model to weights.best.gray_model.hdf5

Epoch 00077: val_loss did not improve from 2.12025

Epoch 00078: val_loss did not improve from 2.12025

Epoch 00079: val_loss did not improve from 2.12025

Epoch 00080: val_loss did not improve from 2.12025

Epoch 00081: val_loss did not improve from 2.12025

Epoch 00082: val_loss did not improve from 2.12025

Epoch 00083: val_loss improved from 2.12025 to 2.06482, saving model to weights.best.gray_model.hdf5

Epoch 00084: val_loss did not improve from 2.06482

Epoch 00085: val_loss improved from 2.06482 to 1.99549, saving model to weights.best.gray_model.hdf5

Epoch 00086: val_loss improved from 1.99549 to 1.93578, saving model to weights.best.gray_model.hdf5

Epoch 00087: val_loss did not improve from 1.93578

Epoch 00088: val_loss improved from 1.93578 to 1.92795, saving model to weights.best.gray_model.hdf5

Epoch 00089: val_loss did not improve from 1.92795

Epoch 00090: val_loss improved from 1.92795 to 1.88600, saving model to weights.best.gray_model.hdf5

Epoch 00091: val_loss improved from 1.88600 to 1.77680, saving model to weights.best.gray_model.hdf5

Epoch 00092: val_loss did not improve from 1.77680

Epoch 00093: val_loss did not improve from 1.77680

Epoch 00094: val_loss improved from 1.77680 to 1.75655, saving model to weights.best.gray_model.hdf5

Epoch 00095: val_loss did not improve from 1.75655

Epoch 00096: val_loss did not improve from 1.75655

Epoch 00097: val_loss did not improve from 1.75655

Epoch 00098: val_loss improved from 1.75655 to 1.69111, saving model to weights.best.gray_model.hdf5

Epoch 00099: val_loss did not improve from 1.69111

Epoch 00100: val_loss did not improve from 1.69111

Epoch 00101: val_loss did not improve from 1.69111

Epoch 00102: val_loss did not improve from 1.69111

Epoch 00103: val_loss did not improve from 1.69111

Epoch 00104: val_loss did not improve from 1.69111

Epoch 00105: val_loss did not improve from 1.69111

Epoch 00106: val_loss did not improve from 1.69111

Epoch 00107: val_loss improved from 1.69111 to 1.59031, saving model to weights.best.gray_model.hdf5

Epoch 00108: val_loss did not improve from 1.59031

Epoch 00109: val_loss did not improve from 1.59031

Epoch 00110: val_loss improved from 1.59031 to 1.55987, saving model to weights.best.gray_model.hdf5

Epoch 00111: val_loss did not improve from 1.55987

Epoch 00112: val_loss improved from 1.55987 to 1.54122, saving model to weights.best.gray_model.hdf5

Epoch 00113: val_loss did not improve from 1.54122

Epoch 00114: val_loss did not improve from 1.54122

Epoch 00115: val_loss did not improve from 1.54122

Epoch 00116: val_loss did not improve from 1.54122

Epoch 00117: val_loss did not improve from 1.54122

Epoch 00118: val_loss improved from 1.54122 to 1.50671, saving model to weights.best.gray_model.hdf5

Epoch 00119: val_loss did not improve from 1.50671

Epoch 00120: val_loss did not improve from 1.50671

Epoch 00121: val_loss did not improve from 1.50671

Epoch 00122: val_loss did not improve from 1.50671

Epoch 00123: val_loss did not improve from 1.50671

Epoch 00124: val_loss improved from 1.50671 to 1.43768, saving model to weights.best.gray_model.hdf5

Epoch 00125: val_loss did not improve from 1.43768

Epoch 00126: val_loss did not improve from 1.43768

Epoch 00127: val_loss did not improve from 1.43768

Epoch 00128: val_loss did not improve from 1.43768

Epoch 00129: val_loss did not improve from 1.43768

Epoch 00130: val_loss did not improve from 1.43768

Epoch 00131: val_loss did not improve from 1.43768

Epoch 00132: val_loss improved from 1.43768 to 1.41228, saving model to weights.best.gray_model.hdf5

Epoch 00133: val_loss did not improve from 1.41228

Epoch 00134: val_loss did not improve from 1.41228

Epoch 00135: val_loss improved from 1.41228 to 1.34279, saving model to weights.best.gray_model.hdf5

Epoch 00136: val_loss did not improve from 1.34279

Epoch 00137: val_loss did not improve from 1.34279

Epoch 00138: val_loss did not improve from 1.34279

Epoch 00139: val_loss did not improve from 1.34279

Epoch 00140: val_loss did not improve from 1.34279

Epoch 00141: val_loss did not improve from 1.34279

Epoch 00142: val_loss did not improve from 1.34279

Epoch 00143: val_loss improved from 1.34279 to 1.30307, saving model to weights.best.gray_model.hdf5

Epoch 00144: val_loss did not improve from 1.30307

Epoch 00145: val_loss did not improve from 1.30307

Epoch 00146: val_loss did not improve from 1.30307

Epoch 00147: val_loss did not improve from 1.30307

Epoch 00148: val_loss did not improve from 1.30307

Epoch 00149: val_loss did not improve from 1.30307

Epoch 00150: val_loss did not improve from 1.30307

Epoch 00151: val_loss did not improve from 1.30307

Epoch 00152: val_loss did not improve from 1.30307

Epoch 00153: val_loss did not improve from 1.30307

Epoch 00153: ReduceLROnPlateau reducing learning rate to 0.000800000037997961.

Epoch 00154: val_loss improved from 1.30307 to 1.23645, saving model to weights.best.gray_model.hdf5

Epoch 00155: val_loss did not improve from 1.23645

Epoch 00156: val_loss did not improve from 1.23645

Epoch 00157: val_loss did not improve from 1.23645

Epoch 00158: val_loss did not improve from 1.23645

Epoch 00159: val_loss did not improve from 1.23645

Epoch 00160: val_loss did not improve from 1.23645

Epoch 00161: val_loss did not improve from 1.23645

Epoch 00162: val_loss did not improve from 1.23645

Epoch 00163: val_loss did not improve from 1.23645

Epoch 00164: val_loss improved from 1.23645 to 1.20559, saving model to weights.best.gray_model.hdf5

Epoch 00165: val_loss did not improve from 1.20559

Epoch 00166: val_loss did not improve from 1.20559

Epoch 00167: val_loss did not improve from 1.20559

Epoch 00168: val_loss did not improve from 1.20559

Epoch 00169: val_loss did not improve from 1.20559

Epoch 00170: val_loss did not improve from 1.20559

Epoch 00171: val_loss did not improve from 1.20559

Epoch 00172: val_loss did not improve from 1.20559

Epoch 00173: val_loss did not improve from 1.20559

Epoch 00174: val_loss did not improve from 1.20559

Epoch 00174: ReduceLROnPlateau reducing learning rate to 0.0006400000303983689.

Epoch 00175: val_loss did not improve from 1.20559

Epoch 00176: val_loss did not improve from 1.20559

Epoch 00177: val_loss did not improve from 1.20559

Epoch 00178: val_loss did not improve from 1.20559

Epoch 00179: val_loss did not improve from 1.20559

Epoch 00180: val_loss did not improve from 1.20559

Epoch 00181: val_loss did not improve from 1.20559

Epoch 00182: val_loss did not improve from 1.20559

Epoch 00183: val_loss did not improve from 1.20559

Epoch 00184: val_loss did not improve from 1.20559

Epoch 00184: ReduceLROnPlateau reducing learning rate to 0.0005120000336319208.

Epoch 00185: val_loss did not improve from 1.20559

Epoch 00186: val_loss did not improve from 1.20559

Epoch 00187: val_loss did not improve from 1.20559

Epoch 00188: val_loss did not improve from 1.20559

Epoch 00189: val_loss did not improve from 1.20559

Epoch 00190: val_loss did not improve from 1.20559

Epoch 00191: val_loss did not improve from 1.20559

Epoch 00192: val_loss did not improve from 1.20559

Epoch 00193: val_loss did not improve from 1.20559

Epoch 00194: val_loss did not improve from 1.20559

Epoch 00194: ReduceLROnPlateau reducing learning rate to 0.00040960004553198815.

Epoch 00195: val_loss did not improve from 1.20559

Epoch 00196: val_loss improved from 1.20559 to 1.17393, saving model to weights.best.gray_model.hdf5

Epoch 00197: val_loss did not improve from 1.17393

Epoch 00198: val_loss did not improve from 1.17393

Epoch 00199: val_loss did not improve from 1.17393

Epoch 00200: val_loss did not improve from 1.17393
In [66]:
hide_code
# Plot the training history
history_plot(gray_history, 0)

Try to reach an accuracy greater than 50%

In [67]:
hide_code
# Load the model with the best validation accuracy
gray_model.load_weights('weights.best.gray_model.hdf5')
# Calculate classification accuracy on the testing set
gray_score = gray_model.evaluate(x_test2, y_test2)
gray_score
165/165 [==============================] - 0s 1ms/step
Out[67]:
[1.081701368635351, 0.6909090912703312]

Apply the ImageDataGenerator() function.

In [68]:
hide_code
# Fit the model with ImageDataGenerator()
steps, epochs = 1000, 10
data_generator = ImageDataGenerator(zoom_range=0.2, shear_range=0.2, rotation_range=20)

gray_generator = gray_model.fit_generator(data_generator.flow(x_train2, y_train2, batch_size=64),
                                          steps_per_epoch = steps, epochs = epochs,
                                          validation_data = (x_valid2, y_valid2), 
                                          callbacks=[gray_checkpointer, gray_lr_reduction], verbose=2)
Epoch 1/10
 - 265s - loss: 1.2616 - acc: 0.6257 - val_loss: 1.1346 - val_acc: 0.6485

Epoch 00001: val_loss improved from 1.17393 to 1.13462, saving model to weights.best.gray_model.hdf5
Epoch 2/10
 - 276s - loss: 1.0199 - acc: 0.6866 - val_loss: 0.9844 - val_acc: 0.7091

Epoch 00002: val_loss improved from 1.13462 to 0.98438, saving model to weights.best.gray_model.hdf5
Epoch 3/10
 - 257s - loss: 0.8821 - acc: 0.7256 - val_loss: 0.9625 - val_acc: 0.7030

Epoch 00003: val_loss improved from 0.98438 to 0.96252, saving model to weights.best.gray_model.hdf5
Epoch 4/10
 - 278s - loss: 0.7678 - acc: 0.7588 - val_loss: 0.9479 - val_acc: 0.7273

Epoch 00004: val_loss improved from 0.96252 to 0.94789, saving model to weights.best.gray_model.hdf5
Epoch 5/10
 - 292s - loss: 0.6857 - acc: 0.7822 - val_loss: 1.0155 - val_acc: 0.7212

Epoch 00005: val_loss did not improve from 0.94789
Epoch 6/10
 - 275s - loss: 0.6113 - acc: 0.8052 - val_loss: 0.8892 - val_acc: 0.7333

Epoch 00006: val_loss improved from 0.94789 to 0.88923, saving model to weights.best.gray_model.hdf5
Epoch 7/10
 - 286s - loss: 0.5583 - acc: 0.8222 - val_loss: 0.9671 - val_acc: 0.7394

Epoch 00007: val_loss did not improve from 0.88923
Epoch 8/10
 - 285s - loss: 0.4997 - acc: 0.8396 - val_loss: 0.9933 - val_acc: 0.7212

Epoch 00008: val_loss did not improve from 0.88923
Epoch 9/10
 - 291s - loss: 0.4645 - acc: 0.8530 - val_loss: 1.0517 - val_acc: 0.6970

Epoch 00009: val_loss did not improve from 0.88923
Epoch 10/10
 - 289s - loss: 0.4217 - acc: 0.8659 - val_loss: 0.9220 - val_acc: 0.7273

Epoch 00010: val_loss did not improve from 0.88923
In [107]:
hide_code
# Load the model with the best validation accuracy
gray_model.load_weights('weights.best.gray_model.hdf5')
# Calculate classification accuracy on the testing set
gray_score = gray_model.evaluate(x_test2, y_test2)
gray_score
165/165 [==============================] - 3s 17ms/step
Out[107]:
[0.6657397743427392, 0.7878787882400281]

Let's compare the results with classifying algorithms.

In [80]:
hide_code
# Fit the classifier and get the accuracy score
y_train2_c = np.array([np.argmax(y) for y in y_train2])
y_test2_c = np.array([np.argmax(y) for y in y_test2])
clf = GradientBoostingClassifier().fit(x_train2.reshape(-1, 32*32), y_train2_c)
clf.score(x_test2.reshape(-1, 32*32), y_test2_c)
Out[80]:
0.081818181818181818
In [81]:
hide_code
# Fit the classifier and get the accuracy score
clf2 = RandomForestClassifier().fit(x_train2.reshape(-1, 32*32), y_train2_c)
clf2.score(x_test2.reshape(-1, 32*32), y_test2_c)
Out[81]:
0.087878787878787876

Step 5. Create a Multi-Label Classification Model

Color Images

In [116]:
hide_code
def multi_model():    
    model_input = Input(shape=(32, 32, 3))
    x = BatchNormalization()(model_input)
    
    # TODO: Define a model architecture
    x = Conv2D(32, (3, 3), padding='same')(model_input)
    x = LeakyReLU(alpha=0.02)(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)    
    x = Dropout(0.25)(x)
    
    x = Conv2D(128, (3, 3), padding='same')(x)
    x = LeakyReLU(alpha=0.02)(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)    
    x = Dropout(0.25)(x)
              
    x = GlobalMaxPooling2D()(x)
    
    x = Dense(1024)(x)
    x = LeakyReLU(alpha=0.02)(x)
    x = Dropout(0.25)(x)
    
    x = Dense(128)(x)  
    x = LeakyReLU(alpha=0.02)(x)
    x = Dropout(0.25)(x)
    
    y1 = Dense(33, activation='softmax')(x)
    y2 = Dense(2, activation='softmax')(x)
    
    model = Model(inputs=model_input, outputs=[y1, y2])
    
    # TODO: Compile the model
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    return model

multi_model = multi_model()
In [117]:
hide_code
# Display the model architecture
multi_model.summary()
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input_11 (InputLayer)           (None, 32, 32, 3)    0                                            
__________________________________________________________________________________________________
conv2d_39 (Conv2D)              (None, 32, 32, 32)   896         input_11[0][0]                   
__________________________________________________________________________________________________
leaky_re_lu_53 (LeakyReLU)      (None, 32, 32, 32)   0           conv2d_39[0][0]                  
__________________________________________________________________________________________________
max_pooling2d_39 (MaxPooling2D) (None, 16, 16, 32)   0           leaky_re_lu_53[0][0]             
__________________________________________________________________________________________________
dropout_73 (Dropout)            (None, 16, 16, 32)   0           max_pooling2d_39[0][0]           
__________________________________________________________________________________________________
conv2d_40 (Conv2D)              (None, 16, 16, 128)  36992       dropout_73[0][0]                 
__________________________________________________________________________________________________
leaky_re_lu_54 (LeakyReLU)      (None, 16, 16, 128)  0           conv2d_40[0][0]                  
__________________________________________________________________________________________________
max_pooling2d_40 (MaxPooling2D) (None, 8, 8, 128)    0           leaky_re_lu_54[0][0]             
__________________________________________________________________________________________________
dropout_74 (Dropout)            (None, 8, 8, 128)    0           max_pooling2d_40[0][0]           
__________________________________________________________________________________________________
global_max_pooling2d_20 (Global (None, 128)          0           dropout_74[0][0]                 
__________________________________________________________________________________________________
dense_64 (Dense)                (None, 1024)         132096      global_max_pooling2d_20[0][0]    
__________________________________________________________________________________________________
leaky_re_lu_55 (LeakyReLU)      (None, 1024)         0           dense_64[0][0]                   
__________________________________________________________________________________________________
dropout_75 (Dropout)            (None, 1024)         0           leaky_re_lu_55[0][0]             
__________________________________________________________________________________________________
dense_65 (Dense)                (None, 128)          131200      dropout_75[0][0]                 
__________________________________________________________________________________________________
leaky_re_lu_56 (LeakyReLU)      (None, 128)          0           dense_65[0][0]                   
__________________________________________________________________________________________________
dropout_76 (Dropout)            (None, 128)          0           leaky_re_lu_56[0][0]             
__________________________________________________________________________________________________
dense_66 (Dense)                (None, 33)           4257        dropout_76[0][0]                 
__________________________________________________________________________________________________
dense_67 (Dense)                (None, 2)            258         dropout_76[0][0]                 
==================================================================================================
Total params: 305,699
Trainable params: 305,699
Non-trainable params: 0
__________________________________________________________________________________________________
In [118]:
hide_code
# Create callbacks
multi_checkpointer = ModelCheckpoint(filepath='weights.best.multi.hdf5', 
                                     verbose=2, save_best_only=True)
multi_lr_reduction = ReduceLROnPlateau(monitor='val_loss', 
                                       patience=5, verbose=2, factor=0.8)
In [119]:
hide_code
# Train the model
multi_history = multi_model.fit(x_train3, y_train3_list, 
                                validation_data=(x_valid3, y_valid3_list), 
                                epochs=100, batch_size=64, verbose=0, 
                                callbacks=[multi_checkpointer, multi_lr_reduction])
Epoch 00001: val_loss improved from inf to 4.19048, saving model to weights.best.multi.hdf5

Epoch 00002: val_loss improved from 4.19048 to 4.18492, saving model to weights.best.multi.hdf5

Epoch 00003: val_loss improved from 4.18492 to 4.18052, saving model to weights.best.multi.hdf5

Epoch 00004: val_loss improved from 4.18052 to 4.17439, saving model to weights.best.multi.hdf5

Epoch 00005: val_loss improved from 4.17439 to 4.10641, saving model to weights.best.multi.hdf5

Epoch 00006: val_loss improved from 4.10641 to 4.06883, saving model to weights.best.multi.hdf5

Epoch 00007: val_loss improved from 4.06883 to 4.06882, saving model to weights.best.multi.hdf5

Epoch 00008: val_loss improved from 4.06882 to 4.00557, saving model to weights.best.multi.hdf5

Epoch 00009: val_loss improved from 4.00557 to 3.98756, saving model to weights.best.multi.hdf5

Epoch 00010: val_loss improved from 3.98756 to 3.96089, saving model to weights.best.multi.hdf5

Epoch 00011: val_loss did not improve from 3.96089

Epoch 00012: val_loss did not improve from 3.96089

Epoch 00013: val_loss improved from 3.96089 to 3.93254, saving model to weights.best.multi.hdf5

Epoch 00014: val_loss improved from 3.93254 to 3.91957, saving model to weights.best.multi.hdf5

Epoch 00015: val_loss improved from 3.91957 to 3.88585, saving model to weights.best.multi.hdf5

Epoch 00016: val_loss did not improve from 3.88585

Epoch 00017: val_loss did not improve from 3.88585

Epoch 00018: val_loss did not improve from 3.88585

Epoch 00019: val_loss did not improve from 3.88585

Epoch 00020: val_loss improved from 3.88585 to 3.84338, saving model to weights.best.multi.hdf5

Epoch 00021: val_loss did not improve from 3.84338

Epoch 00022: val_loss improved from 3.84338 to 3.82265, saving model to weights.best.multi.hdf5

Epoch 00023: val_loss did not improve from 3.82265

Epoch 00024: val_loss did not improve from 3.82265

Epoch 00025: val_loss did not improve from 3.82265

Epoch 00026: val_loss did not improve from 3.82265

Epoch 00027: val_loss did not improve from 3.82265

Epoch 00027: ReduceLROnPlateau reducing learning rate to 0.000800000037997961.

Epoch 00028: val_loss did not improve from 3.82265

Epoch 00029: val_loss improved from 3.82265 to 3.71624, saving model to weights.best.multi.hdf5

Epoch 00030: val_loss improved from 3.71624 to 3.70138, saving model to weights.best.multi.hdf5

Epoch 00031: val_loss did not improve from 3.70138

Epoch 00032: val_loss improved from 3.70138 to 3.67556, saving model to weights.best.multi.hdf5

Epoch 00033: val_loss did not improve from 3.67556

Epoch 00034: val_loss did not improve from 3.67556

Epoch 00035: val_loss did not improve from 3.67556

Epoch 00036: val_loss did not improve from 3.67556

Epoch 00037: val_loss improved from 3.67556 to 3.57982, saving model to weights.best.multi.hdf5

Epoch 00038: val_loss improved from 3.57982 to 3.53694, saving model to weights.best.multi.hdf5

Epoch 00039: val_loss did not improve from 3.53694

Epoch 00040: val_loss did not improve from 3.53694

Epoch 00041: val_loss did not improve from 3.53694

Epoch 00042: val_loss improved from 3.53694 to 3.43455, saving model to weights.best.multi.hdf5

Epoch 00043: val_loss did not improve from 3.43455

Epoch 00044: val_loss did not improve from 3.43455

Epoch 00045: val_loss did not improve from 3.43455

Epoch 00046: val_loss did not improve from 3.43455

Epoch 00047: val_loss did not improve from 3.43455

Epoch 00047: ReduceLROnPlateau reducing learning rate to 0.0006400000303983689.

Epoch 00048: val_loss did not improve from 3.43455

Epoch 00049: val_loss did not improve from 3.43455

Epoch 00050: val_loss did not improve from 3.43455

Epoch 00051: val_loss did not improve from 3.43455

Epoch 00052: val_loss did not improve from 3.43455

Epoch 00052: ReduceLROnPlateau reducing learning rate to 0.0005120000336319208.

Epoch 00053: val_loss did not improve from 3.43455

Epoch 00054: val_loss did not improve from 3.43455

Epoch 00055: val_loss did not improve from 3.43455

Epoch 00056: val_loss did not improve from 3.43455

Epoch 00057: val_loss improved from 3.43455 to 3.40873, saving model to weights.best.multi.hdf5

Epoch 00058: val_loss did not improve from 3.40873

Epoch 00059: val_loss improved from 3.40873 to 3.32220, saving model to weights.best.multi.hdf5

Epoch 00060: val_loss did not improve from 3.32220

Epoch 00061: val_loss did not improve from 3.32220

Epoch 00062: val_loss did not improve from 3.32220

Epoch 00063: val_loss did not improve from 3.32220

Epoch 00064: val_loss did not improve from 3.32220

Epoch 00064: ReduceLROnPlateau reducing learning rate to 0.00040960004553198815.

Epoch 00065: val_loss did not improve from 3.32220

Epoch 00066: val_loss did not improve from 3.32220

Epoch 00067: val_loss did not improve from 3.32220

Epoch 00068: val_loss did not improve from 3.32220

Epoch 00069: val_loss improved from 3.32220 to 3.30941, saving model to weights.best.multi.hdf5

Epoch 00070: val_loss did not improve from 3.30941

Epoch 00071: val_loss did not improve from 3.30941

Epoch 00072: val_loss did not improve from 3.30941

Epoch 00073: val_loss did not improve from 3.30941

Epoch 00074: val_loss did not improve from 3.30941

Epoch 00074: ReduceLROnPlateau reducing learning rate to 0.00032768002711236477.

Epoch 00075: val_loss did not improve from 3.30941

Epoch 00076: val_loss did not improve from 3.30941

Epoch 00077: val_loss did not improve from 3.30941

Epoch 00078: val_loss did not improve from 3.30941

Epoch 00079: val_loss did not improve from 3.30941

Epoch 00079: ReduceLROnPlateau reducing learning rate to 0.0002621440216898918.

Epoch 00080: val_loss did not improve from 3.30941

Epoch 00081: val_loss did not improve from 3.30941

Epoch 00082: val_loss did not improve from 3.30941

Epoch 00083: val_loss did not improve from 3.30941

Epoch 00084: val_loss did not improve from 3.30941

Epoch 00084: ReduceLROnPlateau reducing learning rate to 0.00020971521735191345.

Epoch 00085: val_loss did not improve from 3.30941

Epoch 00086: val_loss did not improve from 3.30941

Epoch 00087: val_loss did not improve from 3.30941

Epoch 00088: val_loss did not improve from 3.30941

Epoch 00089: val_loss did not improve from 3.30941

Epoch 00089: ReduceLROnPlateau reducing learning rate to 0.00016777217388153076.

Epoch 00090: val_loss did not improve from 3.30941

Epoch 00091: val_loss did not improve from 3.30941

Epoch 00092: val_loss did not improve from 3.30941

Epoch 00093: val_loss did not improve from 3.30941

Epoch 00094: val_loss did not improve from 3.30941

Epoch 00094: ReduceLROnPlateau reducing learning rate to 0.00013421773910522462.

Epoch 00095: val_loss did not improve from 3.30941

Epoch 00096: val_loss did not improve from 3.30941

Epoch 00097: val_loss did not improve from 3.30941

Epoch 00098: val_loss did not improve from 3.30941

Epoch 00099: val_loss did not improve from 3.30941

Epoch 00099: ReduceLROnPlateau reducing learning rate to 0.00010737419361248613.

Epoch 00100: val_loss did not improve from 3.30941

We should have an accuracy greater than 3% for the first target (letter) and greater than 50% for the second target (background).

In [120]:
hide_code
# Load the model with the best validation accuracy
multi_model.load_weights('weights.best.multi.hdf5')
# Calculate classification accuracy on the testing set
multi_scores = multi_model.evaluate(x_test3, y_test3_list, verbose=0)

print("Scores: \n" , (multi_scores))
print("First label. Accuracy: %.2f%%" % (multi_scores[3]*100))
print("Second label. Accuracy: %.2f%%" % (multi_scores[4]*100))
Scores: 
 [4.226035048744896, 2.9441545125209925, 1.2818806554331923, 0.07272727272727272, 0.5272727276339676]
First label. Accuracy: 7.27%
Second label. Accuracy: 52.73%

Grayscaled Images

In [107]:
hide_code
def gray_multi_model():    
    model_input = Input(shape=(32, 32, 1))
    x = BatchNormalization()(model_input)
    
    # TODO: Define a model architecture
    x = Conv2D(32, (5, 5), padding='same')(model_input)
    x = LeakyReLU(alpha=0.02)(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)    
    x = Dropout(0.25)(x)
    
    x = Conv2D(256, (5, 5), padding='same')(x)
    x = LeakyReLU(alpha=0.02)(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)    
    x = Dropout(0.25)(x)
              
    x = GlobalMaxPooling2D()(x)
    
    x = Dense(1024)(x)
    x = LeakyReLU(alpha=0.02)(x)
    x = Dropout(0.25)(x)
    
    x = Dense(256)(x)
    x = LeakyReLU(alpha=0.02)(x)
    x = Dropout(0.25)(x)
    
    y1 = Dense(33, activation='softmax')(x)
    y2 = Dense(2, activation='softmax')(x)
    
    model = Model(inputs=model_input, outputs=[y1, y2])
    
    # TODO: Compile the model    
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
    return model

gray_multi_model = gray_multi_model()
In [108]:
hide_code
# Display the model architecture
gray_multi_model.summary()
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input_9 (InputLayer)            (None, 32, 32, 1)    0                                            
__________________________________________________________________________________________________
conv2d_35 (Conv2D)              (None, 32, 32, 32)   832         input_9[0][0]                    
__________________________________________________________________________________________________
leaky_re_lu_45 (LeakyReLU)      (None, 32, 32, 32)   0           conv2d_35[0][0]                  
__________________________________________________________________________________________________
max_pooling2d_35 (MaxPooling2D) (None, 16, 16, 32)   0           leaky_re_lu_45[0][0]             
__________________________________________________________________________________________________
dropout_65 (Dropout)            (None, 16, 16, 32)   0           max_pooling2d_35[0][0]           
__________________________________________________________________________________________________
conv2d_36 (Conv2D)              (None, 16, 16, 256)  205056      dropout_65[0][0]                 
__________________________________________________________________________________________________
leaky_re_lu_46 (LeakyReLU)      (None, 16, 16, 256)  0           conv2d_36[0][0]                  
__________________________________________________________________________________________________
max_pooling2d_36 (MaxPooling2D) (None, 8, 8, 256)    0           leaky_re_lu_46[0][0]             
__________________________________________________________________________________________________
dropout_66 (Dropout)            (None, 8, 8, 256)    0           max_pooling2d_36[0][0]           
__________________________________________________________________________________________________
global_max_pooling2d_18 (Global (None, 256)          0           dropout_66[0][0]                 
__________________________________________________________________________________________________
dense_56 (Dense)                (None, 1024)         263168      global_max_pooling2d_18[0][0]    
__________________________________________________________________________________________________
leaky_re_lu_47 (LeakyReLU)      (None, 1024)         0           dense_56[0][0]                   
__________________________________________________________________________________________________
dropout_67 (Dropout)            (None, 1024)         0           leaky_re_lu_47[0][0]             
__________________________________________________________________________________________________
dense_57 (Dense)                (None, 256)          262400      dropout_67[0][0]                 
__________________________________________________________________________________________________
leaky_re_lu_48 (LeakyReLU)      (None, 256)          0           dense_57[0][0]                   
__________________________________________________________________________________________________
dropout_68 (Dropout)            (None, 256)          0           leaky_re_lu_48[0][0]             
__________________________________________________________________________________________________
dense_58 (Dense)                (None, 33)           8481        dropout_68[0][0]                 
__________________________________________________________________________________________________
dense_59 (Dense)                (None, 2)            514         dropout_68[0][0]                 
==================================================================================================
Total params: 740,451
Trainable params: 740,451
Non-trainable params: 0
__________________________________________________________________________________________________
In [109]:
hide_code
# Create callbacks
gray_multi_checkpointer = ModelCheckpoint(filepath='weights.best.gray_multi.hdf5', 
                                          verbose=2, save_best_only=True)
hide_code
# Display the model architecture
gray_multi_lr_reduction = ReduceLROnPlateau(monitor='val_loss', 
                                            patience=10, verbose=2, factor=0.8)
In [110]:
hide_code
# Train the model
gray_multi_history = gray_multi_model.fit(x_train4, y_train4_list, 
                                          validation_data=(x_valid4, y_valid4_list), 
                                          epochs=100, batch_size=64, verbose=0, 
                                          callbacks=[gray_multi_checkpointer, gray_multi_lr_reduction])
Epoch 00001: val_loss improved from inf to 4.19105, saving model to weights.best.gray_multi.hdf5

Epoch 00002: val_loss improved from 4.19105 to 4.18717, saving model to weights.best.gray_multi.hdf5

Epoch 00003: val_loss did not improve from 4.18717

Epoch 00004: val_loss did not improve from 4.18717

Epoch 00005: val_loss improved from 4.18717 to 4.18072, saving model to weights.best.gray_multi.hdf5

Epoch 00006: val_loss did not improve from 4.18072

Epoch 00007: val_loss improved from 4.18072 to 4.18071, saving model to weights.best.gray_multi.hdf5

Epoch 00008: val_loss improved from 4.18071 to 4.16065, saving model to weights.best.gray_multi.hdf5

Epoch 00009: val_loss did not improve from 4.16065

Epoch 00010: val_loss improved from 4.16065 to 4.11665, saving model to weights.best.gray_multi.hdf5

Epoch 00011: val_loss improved from 4.11665 to 4.10661, saving model to weights.best.gray_multi.hdf5

Epoch 00012: val_loss improved from 4.10661 to 4.10513, saving model to weights.best.gray_multi.hdf5

Epoch 00013: val_loss did not improve from 4.10513

Epoch 00014: val_loss did not improve from 4.10513

Epoch 00015: val_loss did not improve from 4.10513

Epoch 00016: val_loss did not improve from 4.10513

Epoch 00017: val_loss improved from 4.10513 to 4.09313, saving model to weights.best.gray_multi.hdf5

Epoch 00018: val_loss improved from 4.09313 to 4.08411, saving model to weights.best.gray_multi.hdf5

Epoch 00019: val_loss did not improve from 4.08411

Epoch 00020: val_loss did not improve from 4.08411

Epoch 00021: val_loss did not improve from 4.08411

Epoch 00022: val_loss did not improve from 4.08411

Epoch 00023: val_loss did not improve from 4.08411

Epoch 00024: val_loss did not improve from 4.08411

Epoch 00025: val_loss did not improve from 4.08411

Epoch 00026: val_loss improved from 4.08411 to 4.06733, saving model to weights.best.gray_multi.hdf5

Epoch 00027: val_loss did not improve from 4.06733

Epoch 00028: val_loss did not improve from 4.06733

Epoch 00029: val_loss did not improve from 4.06733

Epoch 00030: val_loss did not improve from 4.06733

Epoch 00031: val_loss did not improve from 4.06733

Epoch 00032: val_loss did not improve from 4.06733

Epoch 00033: val_loss did not improve from 4.06733

Epoch 00034: val_loss did not improve from 4.06733

Epoch 00035: val_loss did not improve from 4.06733

Epoch 00036: val_loss did not improve from 4.06733

Epoch 00036: ReduceLROnPlateau reducing learning rate to 0.000800000037997961.

Epoch 00037: val_loss did not improve from 4.06733

Epoch 00038: val_loss did not improve from 4.06733

Epoch 00039: val_loss did not improve from 4.06733

Epoch 00040: val_loss did not improve from 4.06733

Epoch 00041: val_loss did not improve from 4.06733

Epoch 00042: val_loss did not improve from 4.06733

Epoch 00043: val_loss improved from 4.06733 to 4.06624, saving model to weights.best.gray_multi.hdf5

Epoch 00044: val_loss did not improve from 4.06624

Epoch 00045: val_loss did not improve from 4.06624

Epoch 00046: val_loss did not improve from 4.06624

Epoch 00047: val_loss did not improve from 4.06624

Epoch 00048: val_loss did not improve from 4.06624

Epoch 00049: val_loss did not improve from 4.06624

Epoch 00050: val_loss did not improve from 4.06624

Epoch 00051: val_loss did not improve from 4.06624

Epoch 00052: val_loss improved from 4.06624 to 4.05134, saving model to weights.best.gray_multi.hdf5

Epoch 00053: val_loss did not improve from 4.05134

Epoch 00054: val_loss did not improve from 4.05134

Epoch 00055: val_loss did not improve from 4.05134

Epoch 00056: val_loss did not improve from 4.05134

Epoch 00057: val_loss did not improve from 4.05134

Epoch 00058: val_loss did not improve from 4.05134

Epoch 00059: val_loss did not improve from 4.05134

Epoch 00060: val_loss did not improve from 4.05134

Epoch 00061: val_loss did not improve from 4.05134

Epoch 00062: val_loss did not improve from 4.05134

Epoch 00062: ReduceLROnPlateau reducing learning rate to 0.0006400000303983689.

Epoch 00063: val_loss improved from 4.05134 to 4.05088, saving model to weights.best.gray_multi.hdf5

Epoch 00064: val_loss did not improve from 4.05088

Epoch 00065: val_loss did not improve from 4.05088

Epoch 00066: val_loss improved from 4.05088 to 4.04149, saving model to weights.best.gray_multi.hdf5

Epoch 00067: val_loss did not improve from 4.04149

Epoch 00068: val_loss did not improve from 4.04149

Epoch 00069: val_loss did not improve from 4.04149

Epoch 00070: val_loss did not improve from 4.04149

Epoch 00071: val_loss did not improve from 4.04149

Epoch 00072: val_loss improved from 4.04149 to 4.00452, saving model to weights.best.gray_multi.hdf5

Epoch 00073: val_loss did not improve from 4.00452

Epoch 00074: val_loss did not improve from 4.00452

Epoch 00075: val_loss did not improve from 4.00452

Epoch 00076: val_loss did not improve from 4.00452

Epoch 00077: val_loss improved from 4.00452 to 4.00124, saving model to weights.best.gray_multi.hdf5

Epoch 00078: val_loss did not improve from 4.00124

Epoch 00079: val_loss improved from 4.00124 to 3.98924, saving model to weights.best.gray_multi.hdf5

Epoch 00080: val_loss did not improve from 3.98924

Epoch 00081: val_loss did not improve from 3.98924

Epoch 00082: val_loss did not improve from 3.98924

Epoch 00083: val_loss did not improve from 3.98924

Epoch 00084: val_loss improved from 3.98924 to 3.97357, saving model to weights.best.gray_multi.hdf5

Epoch 00085: val_loss did not improve from 3.97357

Epoch 00086: val_loss did not improve from 3.97357

Epoch 00087: val_loss improved from 3.97357 to 3.89358, saving model to weights.best.gray_multi.hdf5

Epoch 00088: val_loss did not improve from 3.89358

Epoch 00089: val_loss did not improve from 3.89358

Epoch 00090: val_loss did not improve from 3.89358

Epoch 00091: val_loss did not improve from 3.89358

Epoch 00092: val_loss did not improve from 3.89358

Epoch 00093: val_loss improved from 3.89358 to 3.86942, saving model to weights.best.gray_multi.hdf5

Epoch 00094: val_loss did not improve from 3.86942

Epoch 00095: val_loss improved from 3.86942 to 3.79982, saving model to weights.best.gray_multi.hdf5

Epoch 00096: val_loss did not improve from 3.79982

Epoch 00097: val_loss improved from 3.79982 to 3.74953, saving model to weights.best.gray_multi.hdf5

Epoch 00098: val_loss did not improve from 3.74953

Epoch 00099: val_loss improved from 3.74953 to 3.73606, saving model to weights.best.gray_multi.hdf5

Epoch 00100: val_loss did not improve from 3.73606

We should have an accuracy greater than 3% for the first target (letter) and greater than 50% for the second target (background).

In [111]:
hide_code
# Load the model with the best validation accuracy
gray_multi_model.load_weights('weights.best.gray_multi.hdf5')
# Calculate classification accuracy on the testing set
gray_multi_scores = gray_multi_model.evaluate(x_test4, y_test4_list, verbose=0)

print("Scores: \n" , (gray_multi_scores))
print("First label. Accuracy: %.2f%%" % (gray_multi_scores[3]*100))
print("Second label. Accuracy: %.2f%%" % (gray_multi_scores[4]*100))
Scores: 
 [3.863917894074411, 3.276771663896965, 0.5871462684689146, 0.11515151515151516, 0.7636363639976039]
First label. Accuracy: 11.52%
Second label. Accuracy: 76.36%

Step 6. Keras Applications

Choose one of the keras applications and try to reach an accuracy greater than 30%

Color Images

In [39]:
hide_code
# Create bottleneck features
resize_x_train = np.array([scipy.misc.imresize(x_train[i], (139, 139, 3)) 
                           for i in range(0, len(x_train))]).astype('float32')
resize_x_valid = np.array([scipy.misc.imresize(x_valid[i], (139, 139, 3)) 
                           for i in range(0, len(x_valid))]).astype('float32')
resize_x_test = np.array([scipy.misc.imresize(x_test[i], (139, 139, 3)) 
                          for i in range(0, len(x_test))]).astype('float32')

iv3_x_train = preprocess_input(resize_x_train)
iv3_x_valid = preprocess_input(resize_x_valid)
iv3_x_test = preprocess_input(resize_x_test)

iv3_base_model = InceptionV3(weights='imagenet', include_top=False)
x_train_bn = iv3_base_model.predict(iv3_x_train)
x_valid_bn = iv3_base_model.predict(iv3_x_valid)
x_test_bn = iv3_base_model.predict(iv3_x_test)
In [40]:
hide_code
# Save bottleneck features
x_train_bn = np.squeeze(x_train_bn)
x_valid_bn = np.squeeze(x_valid_bn)
x_test_bn = np.squeeze(x_test_bn)

np.save('x_train_bn.npy', x_train_bn)
np.save('x_valid_bn.npy', x_valid_bn)
np.save('x_test_bn.npy', x_test_bn)
In [41]:
hide_code
# Load bottleneck features
x_train_bn = np.load('x_train_bn.npy')
x_valid_bn = np.load('x_valid_bn.npy')
x_test_bn = np.load('x_test_bn.npy')
In [86]:
hide_code
def iv3_model():
    model = Sequential()
    # TODO: Define a model architecture
    model.add(Conv2D(filters=32, kernel_size=2, input_shape=x_train_bn.shape[1:]))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    
    model.add(GlobalAveragePooling2D())
    model.add(Dropout(0.25))
    
    model.add(Dense(2048))
    model.add(LeakyReLU(alpha=0.02))
    model.add(Dropout(0.25))
        
    model.add(Dense(256))
    model.add(LeakyReLU(alpha=0.02))
    model.add(Dropout(0.2))
    
    model.add(Dense(33, activation='softmax'))
    # TODO: Compile the model     
    model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=['accuracy'])
    return model

iv3_model = iv3_model()
In [87]:
hide_code
# Create callbacks
iv3_checkpointer = ModelCheckpoint(filepath='weights.best.iv3.hdf5', 
                                     verbose=2, save_best_only=True)

iv3_lr_reduction = ReduceLROnPlateau(monitor='val_loss', 
                                     patience=10, verbose=2, factor=0.8)
In [88]:
hide_code
# Fit the model 
iv3_history = iv3_model.fit(x_train_bn, y_train, 
                            validation_data=(x_valid_bn, y_valid),
                            epochs=50, batch_size=64, 
                            callbacks=[iv3_checkpointer, iv3_lr_reduction], verbose=0);
Epoch 00001: val_loss improved from inf to 4.99152, saving model to weights.best.iv3.hdf5

Epoch 00002: val_loss improved from 4.99152 to 4.28398, saving model to weights.best.iv3.hdf5

Epoch 00003: val_loss improved from 4.28398 to 3.02228, saving model to weights.best.iv3.hdf5

Epoch 00004: val_loss did not improve from 3.02228

Epoch 00005: val_loss improved from 3.02228 to 2.88995, saving model to weights.best.iv3.hdf5

Epoch 00006: val_loss improved from 2.88995 to 2.67655, saving model to weights.best.iv3.hdf5

Epoch 00007: val_loss did not improve from 2.67655

Epoch 00008: val_loss improved from 2.67655 to 2.66847, saving model to weights.best.iv3.hdf5

Epoch 00009: val_loss improved from 2.66847 to 2.48350, saving model to weights.best.iv3.hdf5

Epoch 00010: val_loss improved from 2.48350 to 2.41688, saving model to weights.best.iv3.hdf5

Epoch 00011: val_loss did not improve from 2.41688

Epoch 00012: val_loss improved from 2.41688 to 2.39822, saving model to weights.best.iv3.hdf5

Epoch 00013: val_loss improved from 2.39822 to 2.35037, saving model to weights.best.iv3.hdf5

Epoch 00014: val_loss improved from 2.35037 to 2.22754, saving model to weights.best.iv3.hdf5

Epoch 00015: val_loss did not improve from 2.22754

Epoch 00016: val_loss did not improve from 2.22754

Epoch 00017: val_loss did not improve from 2.22754

Epoch 00018: val_loss did not improve from 2.22754

Epoch 00019: val_loss did not improve from 2.22754

Epoch 00020: val_loss improved from 2.22754 to 2.15635, saving model to weights.best.iv3.hdf5

Epoch 00021: val_loss did not improve from 2.15635

Epoch 00022: val_loss did not improve from 2.15635

Epoch 00023: val_loss did not improve from 2.15635

Epoch 00024: val_loss did not improve from 2.15635

Epoch 00025: val_loss did not improve from 2.15635

Epoch 00026: val_loss did not improve from 2.15635

Epoch 00027: val_loss did not improve from 2.15635

Epoch 00028: val_loss did not improve from 2.15635

Epoch 00029: val_loss did not improve from 2.15635

Epoch 00030: val_loss did not improve from 2.15635

Epoch 00030: ReduceLROnPlateau reducing learning rate to 0.001600000075995922.

Epoch 00031: val_loss did not improve from 2.15635

Epoch 00032: val_loss did not improve from 2.15635

Epoch 00033: val_loss did not improve from 2.15635

Epoch 00034: val_loss did not improve from 2.15635

Epoch 00035: val_loss did not improve from 2.15635

Epoch 00036: val_loss did not improve from 2.15635

Epoch 00037: val_loss did not improve from 2.15635

Epoch 00038: val_loss did not improve from 2.15635

Epoch 00039: val_loss did not improve from 2.15635

Epoch 00040: val_loss did not improve from 2.15635

Epoch 00040: ReduceLROnPlateau reducing learning rate to 0.0012800000607967378.

Epoch 00041: val_loss did not improve from 2.15635

Epoch 00042: val_loss did not improve from 2.15635

Epoch 00043: val_loss did not improve from 2.15635

Epoch 00044: val_loss did not improve from 2.15635

Epoch 00045: val_loss did not improve from 2.15635

Epoch 00046: val_loss did not improve from 2.15635

Epoch 00047: val_loss did not improve from 2.15635

Epoch 00048: val_loss did not improve from 2.15635

Epoch 00049: val_loss did not improve from 2.15635

Epoch 00050: val_loss did not improve from 2.15635

Epoch 00050: ReduceLROnPlateau reducing learning rate to 0.0010240000672638416.
In [89]:
hide_code
# Plot the training history
history_plot(iv3_history, 0)
In [90]:
hide_code
# Load the model with the best validation accuracy
iv3_model.load_weights('weights.best.iv3.hdf5')
# Calculate classification accuracy on the testing set
iv3_scores = iv3_model.evaluate(x_test_bn, y_test)
print("Accuracy: %.2f%%" % (iv3_scores[1]*100))
iv3_scores
165/165 [==============================] - 0s 405us/step
Accuracy: 34.55%
Out[90]:
[2.110304995739099, 0.3454545456351656]

Grayscaled Images

In [91]:
hide_code
# Create bottleneck features
resize_x_train2 = np.array([scipy.misc.imresize(x_train2_color[i], (139, 139, 3)) 
                            for i in range(0, len(x_train2_color))]).astype('float32')
resize_x_valid2 = np.array([scipy.misc.imresize(x_valid2_color[i], (139, 139, 3)) 
                            for i in range(0, len(x_valid2_color))]).astype('float32')
resize_x_test2 = np.array([scipy.misc.imresize(x_test2_color[i], (139, 139, 3)) 
                           for i in range(0, len(x_test2_color))]).astype('float32')

iv3_x_train2 = preprocess_input(resize_x_train2)
iv3_x_valid2 = preprocess_input(resize_x_valid2)
iv3_x_test2 = preprocess_input(resize_x_test2)

iv3_base_model2 = InceptionV3(weights='imagenet', include_top=False)
x_train_bn2 = iv3_base_model2.predict(iv3_x_train2)
x_valid_bn2 = iv3_base_model2.predict(iv3_x_valid2)
x_test_bn2 = iv3_base_model2.predict(iv3_x_test2)
In [94]:
hide_code
# Save bottleneck features
x_train_bn2 = np.squeeze(x_train_bn2)
x_valid_bn2 = np.squeeze(x_valid_bn2)
x_test_bn2 = np.squeeze(x_test_bn2)

np.save('x_train_bn2.npy', x_train_bn2)
np.save('x_valid_bn2.npy', x_valid_bn2)
np.save('x_test_bn2.npy', x_test_bn2)
In [95]:
hide_code
# Load bottleneck features
x_train_bn2 = np.load('x_train_bn2.npy')
x_valid_bn2 = np.load('x_valid_bn2.npy')
x_test_bn2 = np.load('x_test_bn2.npy')
In [96]:
hide_code
def iv3_gray_model():
    model = Sequential()
    
    # TODO: Define a model architecture
    model.add(Conv2D(filters=32, kernel_size=2, input_shape=x_train_bn2.shape[1:]))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    
    model.add(GlobalAveragePooling2D())
    model.add(Dropout(0.25))
    
    model.add(Dense(2048))
    model.add(LeakyReLU(alpha=0.02))    
    model.add(Dropout(0.25))
        
    model.add(Dense(256))
    model.add(LeakyReLU(alpha=0.02))
    model.add(Dropout(0.25))
    
    model.add(Dense(33, activation='softmax'))
    
    # TODO: Compile the model     
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
    return model

iv3_gray_model = iv3_gray_model()
In [97]:
hide_code
# Create callbacks
iv3_gray_checkpointer = ModelCheckpoint(filepath='weights.best.iv3_gray.hdf5', 
                                        verbose=2, save_best_only=True)
iv3_gray_lr_reduction = ReduceLROnPlateau(monitor='val_loss', 
                                          patience=5, verbose=2, factor=0.8)
In [98]:
hide_code
# Fit the model 
iv3_gray_history = iv3_gray_model.fit(x_train_bn2, y_train2, 
                                      validation_data=(x_valid_bn2, y_valid2),
                                      epochs=50, batch_size=64, 
                                      callbacks=[iv3_gray_checkpointer, iv3_gray_lr_reduction], verbose=0);
Epoch 00001: val_loss improved from inf to 3.41114, saving model to weights.best.iv3_gray.hdf5

Epoch 00002: val_loss improved from 3.41114 to 3.36666, saving model to weights.best.iv3_gray.hdf5

Epoch 00003: val_loss improved from 3.36666 to 3.22700, saving model to weights.best.iv3_gray.hdf5

Epoch 00004: val_loss improved from 3.22700 to 3.05201, saving model to weights.best.iv3_gray.hdf5

Epoch 00005: val_loss improved from 3.05201 to 2.88743, saving model to weights.best.iv3_gray.hdf5

Epoch 00006: val_loss did not improve from 2.88743

Epoch 00007: val_loss improved from 2.88743 to 2.80730, saving model to weights.best.iv3_gray.hdf5

Epoch 00008: val_loss improved from 2.80730 to 2.60090, saving model to weights.best.iv3_gray.hdf5

Epoch 00009: val_loss did not improve from 2.60090

Epoch 00010: val_loss did not improve from 2.60090

Epoch 00011: val_loss did not improve from 2.60090

Epoch 00012: val_loss did not improve from 2.60090

Epoch 00013: val_loss did not improve from 2.60090

Epoch 00013: ReduceLROnPlateau reducing learning rate to 0.000800000037997961.

Epoch 00014: val_loss improved from 2.60090 to 2.47337, saving model to weights.best.iv3_gray.hdf5

Epoch 00015: val_loss did not improve from 2.47337

Epoch 00016: val_loss did not improve from 2.47337

Epoch 00017: val_loss did not improve from 2.47337

Epoch 00018: val_loss did not improve from 2.47337

Epoch 00019: val_loss did not improve from 2.47337

Epoch 00019: ReduceLROnPlateau reducing learning rate to 0.0006400000303983689.

Epoch 00020: val_loss did not improve from 2.47337

Epoch 00021: val_loss did not improve from 2.47337

Epoch 00022: val_loss did not improve from 2.47337

Epoch 00023: val_loss did not improve from 2.47337

Epoch 00024: val_loss improved from 2.47337 to 2.39465, saving model to weights.best.iv3_gray.hdf5

Epoch 00025: val_loss did not improve from 2.39465

Epoch 00026: val_loss did not improve from 2.39465

Epoch 00027: val_loss did not improve from 2.39465

Epoch 00028: val_loss did not improve from 2.39465

Epoch 00029: val_loss did not improve from 2.39465

Epoch 00029: ReduceLROnPlateau reducing learning rate to 0.0005120000336319208.

Epoch 00030: val_loss did not improve from 2.39465

Epoch 00031: val_loss did not improve from 2.39465

Epoch 00032: val_loss did not improve from 2.39465

Epoch 00033: val_loss did not improve from 2.39465

Epoch 00034: val_loss did not improve from 2.39465

Epoch 00034: ReduceLROnPlateau reducing learning rate to 0.00040960004553198815.

Epoch 00035: val_loss did not improve from 2.39465

Epoch 00036: val_loss did not improve from 2.39465

Epoch 00037: val_loss did not improve from 2.39465

Epoch 00038: val_loss did not improve from 2.39465

Epoch 00039: val_loss did not improve from 2.39465

Epoch 00039: ReduceLROnPlateau reducing learning rate to 0.00032768002711236477.

Epoch 00040: val_loss did not improve from 2.39465

Epoch 00041: val_loss did not improve from 2.39465

Epoch 00042: val_loss did not improve from 2.39465

Epoch 00043: val_loss did not improve from 2.39465

Epoch 00044: val_loss did not improve from 2.39465

Epoch 00044: ReduceLROnPlateau reducing learning rate to 0.0002621440216898918.

Epoch 00045: val_loss did not improve from 2.39465

Epoch 00046: val_loss did not improve from 2.39465

Epoch 00047: val_loss did not improve from 2.39465

Epoch 00048: val_loss did not improve from 2.39465

Epoch 00049: val_loss did not improve from 2.39465

Epoch 00049: ReduceLROnPlateau reducing learning rate to 0.00020971521735191345.

Epoch 00050: val_loss did not improve from 2.39465
In [99]:
hide_code
# Plot the training history
history_plot(iv3_gray_history, 0)
In [100]:
hide_code
# Load the model with the best validation accuracy
iv3_gray_model.load_weights('weights.best.iv3_gray.hdf5')
# Calculate classification accuracy on the testing set
iv3_gray_scores = iv3_gray_model.evaluate(x_test_bn2, y_test2)
print("Accuracy: %.2f%%" % (iv3_gray_scores[1]*100))
iv3_gray_scores
165/165 [==============================] - 1s 5ms/step
Accuracy: 40.00%
Out[100]:
[1.9825298302101366, 0.40000000072248054]

Step 7. Predictions

Display predictions for the models with the best accuracy.

Color Images

In [108]:
hide_code
# Create a list of symbols
symbols = ['а','б','в','г','д','е','ё','ж','з','и','й',
           'к','л','м','н','о','п','р','с','т','у','ф',
           'х','ц','ч','ш','щ','ъ','ы','ь','э','ю','я']
In [110]:
hide_code
# Model predictions for the testing dataset
y_test_predict = model.predict_classes(x_test)
In [111]:
hide_code
# Display true labels and predictions
fig = plt.figure(figsize=(18, 18))
for i, idx in enumerate(np.random.choice(x_test.shape[0], size=16, replace=False)):
    ax = fig.add_subplot(4, 4, i + 1, xticks=[], yticks=[])
    ax.imshow(np.squeeze(x_test[idx]))
    pred_idx = y_test_predict[idx]
    true_idx = np.argmax(y_test[idx])
    ax.set_title("{} ({})".format(symbols[pred_idx], symbols[true_idx]),
                 color=("#4876ff" if pred_idx == true_idx else "darkred"))

Grayscaled Images

In [112]:
hide_code
# Model predictions for the testing dataset
y_test2_predict = gray_model.predict_classes(x_test2)
In [113]:
hide_code
# Display true labels and predictions
fig = plt.figure(figsize=(18, 18))
for i, idx in enumerate(np.random.choice(x_test2.shape[0], size=16, replace=False)):
    ax = fig.add_subplot(4, 4, i + 1, xticks=[], yticks=[])
    ax.imshow(np.squeeze(x_test2[idx]), cmap=cm.bone)
    pred_idx = y_test2_predict[idx]
    true_idx = np.argmax(y_test2[idx])
    ax.set_title("{} ({})".format(symbols[pred_idx], symbols[true_idx]),
                 color=("#4876ff" if pred_idx == true_idx else "darkred"))