Deep Learning

Practice Projects

P5: Decor Colorization. Part 1

Step 0. Style and Libraries

Let's choose a style of the Jupyter notebook and import the software libraries. The command hide_code will hide the code cells.

In [9]:
%%html
<style>
@import url('https://fonts.googleapis.com/css?family=Orbitron|Roboto');
body {background-color: aliceblue;} 
a {color: #4876ff; font-family: 'Roboto';} 
h1 {color: #348ABD; font-family: 'Orbitron'; text-shadow: 4px 4px 4px #ccc;} 
h2, h3 {color: slategray; font-family: 'Roboto'; text-shadow: 4px 4px 4px #ccc;}
h4 {color: #348ABD; font-family: 'Orbitron';}
span {text-shadow: 4px 4px 4px #ccc;}
div.output_prompt, div.output_area pre {color: slategray;}
div.input_prompt, div.output_subarea {color: #4876ff;}      
div.output_stderr pre {background-color: aliceblue;}  
div.output_stderr {background-color: slategrey;}                        
</style>
<script>
code_show = true; 
function code_display() {
    if (code_show) {
        $('div.input').each(function(id) {
            if (id == 0 || $(this).html().indexOf('hide_code') > -1) {$(this).hide();}
        });
        $('div.output_prompt').css('opacity', 0);
    } else {
        $('div.input').each(function(id) {$(this).show();});
        $('div.output_prompt').css('opacity', 1);
    };
    code_show = !code_show;
} 
$(document).ready(code_display);
</script>
<form action="javascript: code_display()">
<input style="color: #348ABD; background: aliceblue; opacity: 0.8;" \ 
type="submit" value="Click to display or hide code cells">
</form>                  
In [39]:
hide_code = ''
import numpy as np 
import pandas as pd
import tensorflow as tf

from IPython.core.magic import (register_line_magic, register_cell_magic)
from PIL import ImageFile
from tqdm import tqdm
import h5py
import cv2

import matplotlib.pylab as plt
from matplotlib import cm
%matplotlib inline

from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier

from keras.utils import to_categorical
from keras.preprocessing import image as keras_image
from keras.callbacks import ModelCheckpoint, TensorBoard, ReduceLROnPlateau 
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing.image import array_to_img, img_to_array, load_img

from keras import backend
from keras import losses
from keras.engine.topology import Layer
from keras.optimizers import Adam, Nadam
from keras.engine import InputLayer
from keras.models import Sequential, load_model, Model

from keras.layers import Input, BatchNormalization, Flatten, Dropout
from keras.layers import Dense, LSTM, Activation, LeakyReLU
from keras.layers import Conv2D, MaxPool2D, MaxPooling2D, GlobalMaxPooling2D
from keras.layers import UpSampling2D, Conv2DTranspose
from keras.layers.core import RepeatVector, Permute
from keras.layers import Reshape, concatenate, merge

# from keras.applications.inception_v3 import InceptionV3, preprocess_input
from keras.applications.inception_resnet_v2 import InceptionResNetV2, preprocess_input

from skimage.color import rgb2lab, lab2rgb, rgb2gray, gray2rgb
from skimage import color, measure
from skimage.transform import resize as skimage_resize
from skimage.io import imsave
In [11]:
hide_code
from keras import __version__
print('keras version:', __version__)
print('tensorflow version:', tf.__version__)
keras version: 2.1.6
tensorflow version: 1.8.0
In [12]:
hide_code
# Plot the neural network fitting history
def history_plot(fit_history):
    plt.figure(figsize=(18, 12))
    
    plt.subplot(211)
    plt.plot(fit_history.history['loss'], color='slategray', label = 'train')
    plt.plot(fit_history.history['val_loss'], color='#4876ff', label = 'valid')
    plt.xlabel("Epochs")
    plt.ylabel("Loss")
    plt.legend()
    plt.title('Loss Function');  
    
    plt.subplot(212)
    plt.plot(fit_history.history['acc'], color='slategray', label = 'train')
    plt.plot(fit_history.history['val_acc'], color='#4876ff', label = 'valid')
    plt.xlabel("Epochs")
    plt.ylabel("Accuracy")    
    plt.legend()
    plt.title('Accuracy');

Step 1. Load and Explore the Data

For this project, I have created the dataset of color images (150x150x3) with traditional patterns. Run the following cells to download the data.

In [13]:
hide_code
# Function for processing an image
def image_to_tensor(img_path, folder_path):
    img = keras_image.load_img(folder_path + img_path, target_size=(150, 150))
    x = keras_image.img_to_array(img)
    return np.expand_dims(x, axis=0)
# Function for creating the data tensor
def data_to_tensor(img_paths, folder_path):
    list_of_tensors = [image_to_tensor(img_path, folder_path) for img_path in tqdm(img_paths)]
    return np.vstack(list_of_tensors)
ImageFile.LOAD_TRUNCATED_IMAGES = True 
In [15]:
hide_code
# Load the dataset 
data = pd.read_csv("decor.txt")
files = data['file']

countries = data['country_label'].values
decors = data['decor_label'].values
types = data['type_label'].values

images = data_to_tensor(files, "data/");
100%|██████████| 485/485 [01:10<00:00,  6.87it/s]
In [16]:
hide_code
# Print the shape 
print ('Image shape:', images.shape)
print ('Country shape:', countries.shape)
print ('Decor shape:', decors.shape)
print ('Type shape:', types.shape)
Image shape: (485, 150, 150, 3)
Country shape: (485,)
Decor shape: (485,)
Type shape: (485,)
In [17]:
hide_code
# Read from files and display images using OpenCV
def display_images(img_path, ax):
    img = cv2.imread("data/" + img_path)
    ax.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
    
fig = plt.figure(figsize=(18, 6))
for i in range(10):
    ax = fig.add_subplot(2, 5, i + 1, xticks=[], yticks=[], 
                         title=data['country'][i*48]+'; '+data['decor'][i*48]+'; '+data['type'][i*48])
    display_images(files[i*48], ax)

Step 2. Save and Load the Data

The data tensors can be saved in the appropriate format of files .h5.

In [10]:
hide_code
# Create the tensor file
with h5py.File('DecorColorImages.h5', 'w') as f:
    f.create_dataset('images', data = images)
    f.create_dataset('countries', data = countries)
    f.create_dataset('decors', data = decors)
    f.create_dataset('types', data = types)
    f.close()
In [11]:
hide_code
# Read the h5 file
f = h5py.File('DecorColorImages.h5', 'r')

# List all groups
keys = list(f.keys())
keys
Out[11]:
['countries', 'decors', 'images', 'types']
In [12]:
hide_code
# Create tensors and targets
countries = np.array(f[keys[0]])
decors = np.array(f[keys[1]])
images = np.array(f[keys[2]])
types = np.array(f[keys[3]])

print ('Image shape:', images.shape)
print ('Country shape:', countries.shape)
print ('Decor shape:', decors.shape)
print ('Type shape:', types.shape)
Image shape: (485, 150, 150, 3)
Country shape: (485,)
Decor shape: (485,)
Type shape: (485,)
In [13]:
hide_code
# Create a csv file
images_csv = images.reshape(485,150*150*3)
np.savetxt("decor_images.csv", images_csv, fmt='%i', delimiter=",")
In [14]:
hide_code
# Read the pandas dataframe from csv
data_images = pd.read_csv("decor_images.csv", header=None)
data_images.iloc[:10,:10]
Out[14]:
0 1 2 3 4 5 6 7 8 9
0 253 253 255 253 253 255 253 253 255 253
1 255 255 255 255 255 255 255 255 255 255
2 255 255 255 255 255 255 255 255 255 255
3 255 255 255 255 255 255 255 255 255 255
4 255 255 255 255 255 255 255 255 255 255
5 254 254 255 255 255 255 255 255 255 255
6 255 255 255 255 255 255 255 255 255 255
7 255 255 255 255 255 255 255 255 255 255
8 255 255 255 255 255 255 255 255 255 255
9 255 255 255 255 255 255 255 255 255 255
In [15]:
hide_code
# Read image tensors from the dataframe
images = data_images.as_matrix()
images = images.reshape(-1,150,150,3)

Step 3. Implement Preprocess Functions

In [18]:
hide_code
# Normalize the tensors
images = images.astype('float32')/255
In [19]:
hide_code
# Read and display a tensor using Matplotlib
pattern_number = 106
print('Country: ', countries[pattern_number], '-', data['country'][pattern_number])
print('Decor: ', decors[pattern_number], '-', data['decor'][pattern_number])
print('Type: ', types[pattern_number], '-', data['type'][pattern_number])
plt.figure(figsize=(5,5))
plt.imshow(images[pattern_number]);
Country:  1 - Russia
Decor:  2 - Khokhloma
Type:  1 - pattern
In [20]:
hide_code
# Grayscaled tensors
gray_images = np.dot(images[...,:3], [0.299, 0.587, 0.114])
print ("Shape of grayscaled images:", gray_images.shape)
Shape of grayscaled images: (485, 150, 150)
In [21]:
hide_code
# Read and display a grayscaled tensor using Matplotlib
print('Country: ', countries[pattern_number], '-', data['country'][pattern_number])
print('Decor: ', decors[pattern_number], '-', data['decor'][pattern_number])
print('Type: ', types[pattern_number], '-', data['type'][pattern_number])
plt.figure(figsize=(5,5))
plt.imshow(gray_images[pattern_number], cmap=cm.bone);
Country:  1 - Russia
Decor:  2 - Khokhloma
Type:  1 - pattern
In [13]:
hide_code
# Vectorize an image example / Just for fun
@register_line_magic
def vector(number):
    example = images[int(number)]
    gray_example = color.colorconv.rgb2grey(example)
    contours = measure.find_contours(gray_example, 0.85)
    plt.figure(figsize=(8,8))
    plt.gca().invert_yaxis()
    for n, contour in enumerate(contours):
        plt.plot(contour[:, 1], contour[:, 0], lw=1)
In [14]:
hide_code
# Display a vector image using the magic command
%vector 106
In [22]:
hide_code
# Print the target unique values
print('Countries: ', set(countries))
print('Decors: ', set(decors))
print('Types: ', set(types))
Countries:  {1, 2, 3, 4}
Decors:  {1, 2, 3, 4, 5, 6, 7}
Types:  {1, 2}
In [23]:
# One-hot encode the targets, started from the zero label
cat_countries = to_categorical(np.array(countries-1), 4)
cat_decors = to_categorical(np.array(decors-1), 7)
cat_types = to_categorical(np.array(types-1), 2)
cat_countries.shape, cat_decors.shape, cat_types.shape
Out[23]:
((485, 4), (485, 7), (485, 2))
In [24]:
# Create multi-label targets
targets = np.concatenate((cat_countries, cat_decors), axis=1)
targets = np.concatenate((targets, cat_types), axis=1)
targets.shape
Out[24]:
(485, 13)
In [25]:
hide_code
# Split the data / Color images / Country
x_train, x_test, y_train, y_test = train_test_split(images, cat_countries, 
                                                    test_size = 0.2, 
                                                    random_state = 1)
n = int(len(x_test)/2)
x_valid, y_valid = x_test[:n], y_test[:n]
x_test, y_test = x_test[n:], y_test[n:]
In [26]:
hide_code
# Split the data / Color images / Decor
x_train3, x_test3, y_train3, y_test3 = train_test_split(images, cat_decors, 
                                                        test_size = 0.2, 
                                                        random_state = 1)
n = int(len(x_test3)/2)
x_valid3, y_valid3 = x_test3[:n], y_test3[:n]
x_test3, y_test3 = x_test3[n:], y_test3[n:]
In [27]:
hide_code
# Split the data / Color images / Multi-Label
x_train5, x_test5, y_train5, y_test5 = train_test_split(images, targets, 
                                                        test_size = 0.2, 
                                                        random_state = 1)
n = int(len(x_test5)/2)
x_valid5, y_valid5 = x_test5[:n], y_test5[:n]
x_test5, y_test5 = x_test5[n:], y_test5[n:]
In [28]:
hide_code
# Create a list of targets
y_train5_list = [y_train5[:, :4], y_train5[:, 4:11], y_train5[:, 11:]]
y_test5_list = [y_test5[:, :4], y_test5[:, 4:11], y_test5[:, 11:]]
y_valid5_list = [y_valid5[:, :4], y_valid5[:, 4:11], y_valid5[:, 11:]]
In [29]:
hide_code
# Split the data / Grayscaled images / Country
x_train2, x_test2, y_train2, y_test2 = train_test_split(gray_images, cat_countries, 
                                                        test_size = 0.2, 
                                                        random_state = 1)
n = int(len(x_test2)/2)
x_valid2, y_valid2 = x_test2[:n], y_test2[:n]
x_test2, y_test2 = x_test2[n:], y_test2[n:]
In [30]:
hide_code
# Reshape the grayscaled data
x_train2, x_test2, x_valid2 = \
x_train2.reshape(-1, 150, 150, 1), \
x_test2.reshape(-1, 150, 150, 1), \
x_valid2.reshape(-1, 150, 150, 1)
In [31]:
hide_code
# Split the data / Grayscaled images / Decor
x_train4, x_test4, y_train4, y_test4 = train_test_split(gray_images, cat_decors, 
                                                        test_size = 0.2, 
                                                        random_state = 1)
n = int(len(x_test4)/2)
x_valid4, y_valid4 = x_test4[:n], y_test4[:n]
x_test4, y_test4 = x_test4[n:], y_test4[n:]
In [32]:
hide_code
# Reshape the grayscaled data
x_train4, x_test4, x_valid4 = \
x_train4.reshape(-1, 150, 150, 1), \
x_test4.reshape(-1, 150, 150, 1), \
x_valid4.reshape(-1, 150, 150, 1)
In [33]:
hide_code
# Split the data / Grayscaled images / Multi-Label
x_train6, x_test6, y_train6, y_test6 = train_test_split(gray_images, targets, 
                                                        test_size = 0.2, 
                                                        random_state = 1)
n = int(len(x_test6)/2)
x_valid6, y_valid6 = x_test6[:n], y_test6[:n]
x_test6, y_test6 = x_test6[n:], y_test6[n:]
In [34]:
hide_code
# Reshape the grayscaled data
x_train6, x_test6, x_valid6 = \
x_train6.reshape(-1, 150, 150, 1), \
x_test6.reshape(-1, 150, 150, 1), \
x_valid6.reshape(-1, 150, 150, 1)
In [35]:
hide_code
# Create a list of targets
y_train6_list = [y_train6[:, :4], y_train6[:, 4:11], y_train6[:, 11:]]
y_test6_list = [y_test6[:, :4], y_test6[:, 4:11], y_test6[:, 11:]]
y_valid6_list = [y_valid6[:, :4], y_valid6[:, 4:11], y_valid6[:, 11:]]

Step 4. Create Neural Networks for One-Label Classification

Country & Decor Recognition for Color Images

In [40]:
hide_code
def cc_model(leaky_alpha):
    model = Sequential()
    # TODO: Define a model architecture
    model.add(Conv2D(32, (5, 5), padding='same', input_shape=x_train.shape[1:]))
    model.add(LeakyReLU(alpha=leaky_alpha))    
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Conv2D(96, (5, 5)))
    model.add(LeakyReLU(alpha=leaky_alpha))    
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    
    model.add(GlobalMaxPooling2D()) 
    
    model.add(Dense(512))
    model.add(LeakyReLU(alpha=leaky_alpha))
    model.add(Dropout(0.5)) 
    
    model.add(Dense(4))
    model.add(Activation('softmax'))
    
    # TODO: Compile the model
    model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=['accuracy'])
    
    return model
In [41]:
hide_code
# Train the model
cc_model = cc_model(0.02)
cc_checkpointer = ModelCheckpoint(filepath='weights.best.decor.cc_model.hdf5', 
                                  verbose=2, save_best_only=True)
cc_lr_reduction = ReduceLROnPlateau(monitor='val_loss', 
                                    patience=5, verbose=2, factor=0.5)
cc_history = cc_model.fit(x_train, y_train, 
                          epochs=30, batch_size=16, verbose=2,
                          validation_data=(x_valid, y_valid),
                          callbacks=[cc_checkpointer, cc_lr_reduction])
Train on 388 samples, validate on 48 samples
Epoch 1/30
 - 128s - loss: 1.2090 - acc: 0.5541 - val_loss: 1.3014 - val_acc: 0.5417

Epoch 00001: val_loss improved from inf to 1.30145, saving model to weights.best.decor.cc_model.hdf5
Epoch 2/30
 - 65s - loss: 1.1290 - acc: 0.5438 - val_loss: 1.1966 - val_acc: 0.5417

Epoch 00002: val_loss improved from 1.30145 to 1.19661, saving model to weights.best.decor.cc_model.hdf5
Epoch 3/30
 - 47s - loss: 1.0857 - acc: 0.5593 - val_loss: 1.4703 - val_acc: 0.5625

Epoch 00003: val_loss did not improve from 1.19661
Epoch 4/30
 - 52s - loss: 1.0013 - acc: 0.5799 - val_loss: 1.0703 - val_acc: 0.5417

Epoch 00004: val_loss improved from 1.19661 to 1.07034, saving model to weights.best.decor.cc_model.hdf5
Epoch 5/30
 - 55s - loss: 0.8332 - acc: 0.6521 - val_loss: 1.2771 - val_acc: 0.5417

Epoch 00005: val_loss did not improve from 1.07034
Epoch 6/30
 - 72s - loss: 0.7676 - acc: 0.6804 - val_loss: 1.0143 - val_acc: 0.5833

Epoch 00006: val_loss improved from 1.07034 to 1.01433, saving model to weights.best.decor.cc_model.hdf5
Epoch 7/30
 - 46s - loss: 0.8422 - acc: 0.6675 - val_loss: 0.7492 - val_acc: 0.7917

Epoch 00007: val_loss improved from 1.01433 to 0.74918, saving model to weights.best.decor.cc_model.hdf5
Epoch 8/30
 - 45s - loss: 0.5624 - acc: 0.7758 - val_loss: 0.5889 - val_acc: 0.7708

Epoch 00008: val_loss improved from 0.74918 to 0.58892, saving model to weights.best.decor.cc_model.hdf5
Epoch 9/30
 - 48s - loss: 0.5766 - acc: 0.7526 - val_loss: 0.5846 - val_acc: 0.8125

Epoch 00009: val_loss improved from 0.58892 to 0.58459, saving model to weights.best.decor.cc_model.hdf5
Epoch 10/30
 - 55s - loss: 0.5218 - acc: 0.7964 - val_loss: 0.5737 - val_acc: 0.8125

Epoch 00010: val_loss improved from 0.58459 to 0.57368, saving model to weights.best.decor.cc_model.hdf5
Epoch 11/30
 - 50s - loss: 0.4285 - acc: 0.8351 - val_loss: 0.9127 - val_acc: 0.5833

Epoch 00011: val_loss did not improve from 0.57368
Epoch 12/30
 - 53s - loss: 0.4640 - acc: 0.8273 - val_loss: 0.5164 - val_acc: 0.8750

Epoch 00012: val_loss improved from 0.57368 to 0.51635, saving model to weights.best.decor.cc_model.hdf5
Epoch 13/30
 - 56s - loss: 0.4454 - acc: 0.8222 - val_loss: 0.5025 - val_acc: 0.8542

Epoch 00013: val_loss improved from 0.51635 to 0.50250, saving model to weights.best.decor.cc_model.hdf5
Epoch 14/30
 - 48s - loss: 0.3263 - acc: 0.8789 - val_loss: 1.6849 - val_acc: 0.5833

Epoch 00014: val_loss did not improve from 0.50250
Epoch 15/30
 - 48s - loss: 0.4854 - acc: 0.8325 - val_loss: 0.4445 - val_acc: 0.8542

Epoch 00015: val_loss improved from 0.50250 to 0.44448, saving model to weights.best.decor.cc_model.hdf5
Epoch 16/30
 - 43s - loss: 0.3468 - acc: 0.8686 - val_loss: 0.5565 - val_acc: 0.7708

Epoch 00016: val_loss did not improve from 0.44448
Epoch 17/30
 - 46s - loss: 0.2964 - acc: 0.8892 - val_loss: 0.4432 - val_acc: 0.8125

Epoch 00017: val_loss improved from 0.44448 to 0.44319, saving model to weights.best.decor.cc_model.hdf5
Epoch 18/30
 - 46s - loss: 0.3970 - acc: 0.8505 - val_loss: 0.4886 - val_acc: 0.8125

Epoch 00018: val_loss did not improve from 0.44319
Epoch 19/30
 - 47s - loss: 0.2969 - acc: 0.8814 - val_loss: 0.4679 - val_acc: 0.8750

Epoch 00019: val_loss did not improve from 0.44319
Epoch 20/30
 - 48s - loss: 0.2623 - acc: 0.9149 - val_loss: 0.5234 - val_acc: 0.7292

Epoch 00020: val_loss did not improve from 0.44319
Epoch 21/30
 - 47s - loss: 0.2236 - acc: 0.9175 - val_loss: 0.3732 - val_acc: 0.9167

Epoch 00021: val_loss improved from 0.44319 to 0.37321, saving model to weights.best.decor.cc_model.hdf5
Epoch 22/30
 - 45s - loss: 0.2685 - acc: 0.8866 - val_loss: 3.1307 - val_acc: 0.1875

Epoch 00022: val_loss did not improve from 0.37321
Epoch 23/30
 - 45s - loss: 0.4536 - acc: 0.8402 - val_loss: 0.3651 - val_acc: 0.8750

Epoch 00023: val_loss improved from 0.37321 to 0.36506, saving model to weights.best.decor.cc_model.hdf5
Epoch 24/30
 - 45s - loss: 0.2911 - acc: 0.8840 - val_loss: 0.4018 - val_acc: 0.8750

Epoch 00024: val_loss did not improve from 0.36506
Epoch 25/30
 - 45s - loss: 0.2343 - acc: 0.9072 - val_loss: 0.3658 - val_acc: 0.8958

Epoch 00025: val_loss did not improve from 0.36506
Epoch 26/30
 - 47s - loss: 0.1912 - acc: 0.9253 - val_loss: 0.4452 - val_acc: 0.8125

Epoch 00026: val_loss did not improve from 0.36506
Epoch 27/30
 - 47s - loss: 0.1629 - acc: 0.9459 - val_loss: 0.8442 - val_acc: 0.7292

Epoch 00027: val_loss did not improve from 0.36506
Epoch 28/30
 - 47s - loss: 0.2241 - acc: 0.9149 - val_loss: 0.9585 - val_acc: 0.7292

Epoch 00028: val_loss did not improve from 0.36506

Epoch 00028: ReduceLROnPlateau reducing learning rate to 0.0010000000474974513.
Epoch 29/30
 - 45s - loss: 0.2285 - acc: 0.9304 - val_loss: 0.2946 - val_acc: 0.9167

Epoch 00029: val_loss improved from 0.36506 to 0.29459, saving model to weights.best.decor.cc_model.hdf5
Epoch 30/30
 - 43s - loss: 0.1219 - acc: 0.9562 - val_loss: 0.3265 - val_acc: 0.9167

Epoch 00030: val_loss did not improve from 0.29459
In [42]:
hide_code
# Plot the training history
history_plot(cc_history)
In [43]:
hide_code
# Load the model with the best validation accuracy
cc_model.load_weights('weights.best.decor.cc_model.hdf5')
# Calculate classification accuracy on the testing set
cc_score = cc_model.evaluate(x_test, y_test)
cc_score
49/49 [==============================] - 9s 189ms/step
Out[43]:
[0.4556164607709768, 0.8571428620085424]
In [45]:
hide_code
def cd_model(leaky_alpha):
    model = Sequential()
    # TODO: Define a model architecture
    model.add(Conv2D(32, (5, 5), padding='same', input_shape=x_train.shape[1:]))
    model.add(LeakyReLU(alpha=leaky_alpha))    
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Conv2D(96, (5, 5)))
    model.add(LeakyReLU(alpha=leaky_alpha))    
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    
    model.add(GlobalMaxPooling2D()) 
    
    model.add(Dense(512))
    model.add(LeakyReLU(alpha=leaky_alpha))
    model.add(Dropout(0.25)) 
    
    model.add(Dense(7))
    model.add(Activation('softmax'))
    
    # TODO: Compile the model
    model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=['accuracy'])
    
    return model
In [46]:
hide_code
# Train the model
cd_model = cd_model(0.02)
cd_checkpointer = ModelCheckpoint(filepath='weights.best.decor.cd_model.hdf5', 
                                  verbose=2, save_best_only=True)
cd_lr_reduction = ReduceLROnPlateau(monitor='val_loss', 
                                    patience=5, verbose=2, factor=0.5)
cd_history = cd_model.fit(x_train3, y_train3, 
                          epochs=30, batch_size=16, verbose=2,
                          validation_data=(x_valid3, y_valid3),
                          callbacks=[cd_checkpointer, cd_lr_reduction])
Train on 388 samples, validate on 48 samples
Epoch 1/30
 - 113s - loss: 1.8319 - acc: 0.3067 - val_loss: 1.6800 - val_acc: 0.4583

Epoch 00001: val_loss improved from inf to 1.68001, saving model to weights.best.decor.cd_model.hdf5
Epoch 2/30
 - 43s - loss: 1.3843 - acc: 0.4536 - val_loss: 1.4305 - val_acc: 0.3125

Epoch 00002: val_loss improved from 1.68001 to 1.43052, saving model to weights.best.decor.cd_model.hdf5
Epoch 3/30
 - 43s - loss: 1.1747 - acc: 0.5232 - val_loss: 1.4565 - val_acc: 0.4167

Epoch 00003: val_loss did not improve from 1.43052
Epoch 4/30
 - 43s - loss: 1.1396 - acc: 0.5412 - val_loss: 1.3524 - val_acc: 0.3958

Epoch 00004: val_loss improved from 1.43052 to 1.35238, saving model to weights.best.decor.cd_model.hdf5
Epoch 5/30
 - 43s - loss: 0.9716 - acc: 0.6057 - val_loss: 1.1733 - val_acc: 0.4375

Epoch 00005: val_loss improved from 1.35238 to 1.17335, saving model to weights.best.decor.cd_model.hdf5
Epoch 6/30
 - 43s - loss: 0.8894 - acc: 0.6443 - val_loss: 1.0366 - val_acc: 0.5208

Epoch 00006: val_loss improved from 1.17335 to 1.03664, saving model to weights.best.decor.cd_model.hdf5
Epoch 7/30
 - 42s - loss: 0.6648 - acc: 0.7371 - val_loss: 1.3791 - val_acc: 0.4167

Epoch 00007: val_loss did not improve from 1.03664
Epoch 8/30
 - 42s - loss: 0.7677 - acc: 0.7139 - val_loss: 1.0497 - val_acc: 0.5208

Epoch 00008: val_loss did not improve from 1.03664
Epoch 9/30
 - 42s - loss: 0.5436 - acc: 0.8015 - val_loss: 0.7122 - val_acc: 0.8125

Epoch 00009: val_loss improved from 1.03664 to 0.71224, saving model to weights.best.decor.cd_model.hdf5
Epoch 10/30
 - 42s - loss: 0.4837 - acc: 0.8041 - val_loss: 1.0507 - val_acc: 0.5625

Epoch 00010: val_loss did not improve from 0.71224
Epoch 11/30
 - 42s - loss: 0.4736 - acc: 0.8247 - val_loss: 0.7995 - val_acc: 0.6875

Epoch 00011: val_loss did not improve from 0.71224
Epoch 12/30
 - 43s - loss: 0.4908 - acc: 0.8222 - val_loss: 0.9376 - val_acc: 0.5833

Epoch 00012: val_loss did not improve from 0.71224
Epoch 13/30
 - 43s - loss: 0.4126 - acc: 0.8454 - val_loss: 0.6991 - val_acc: 0.8333

Epoch 00013: val_loss improved from 0.71224 to 0.69910, saving model to weights.best.decor.cd_model.hdf5
Epoch 14/30
 - 42s - loss: 0.4364 - acc: 0.8351 - val_loss: 0.6067 - val_acc: 0.8750

Epoch 00014: val_loss improved from 0.69910 to 0.60668, saving model to weights.best.decor.cd_model.hdf5
Epoch 15/30
 - 44s - loss: 0.3283 - acc: 0.8608 - val_loss: 0.8787 - val_acc: 0.6875

Epoch 00015: val_loss did not improve from 0.60668
Epoch 16/30
 - 42s - loss: 0.4650 - acc: 0.8325 - val_loss: 0.7367 - val_acc: 0.7500

Epoch 00016: val_loss did not improve from 0.60668
Epoch 17/30
 - 42s - loss: 0.3311 - acc: 0.8763 - val_loss: 0.6434 - val_acc: 0.7917

Epoch 00017: val_loss did not improve from 0.60668
Epoch 18/30
 - 42s - loss: 0.2821 - acc: 0.8943 - val_loss: 0.7023 - val_acc: 0.7292

Epoch 00018: val_loss did not improve from 0.60668
Epoch 19/30
 - 42s - loss: 0.2022 - acc: 0.9381 - val_loss: 0.7713 - val_acc: 0.7708

Epoch 00019: val_loss did not improve from 0.60668

Epoch 00019: ReduceLROnPlateau reducing learning rate to 0.0010000000474974513.
Epoch 20/30
 - 43s - loss: 0.1783 - acc: 0.9356 - val_loss: 0.5512 - val_acc: 0.8542

Epoch 00020: val_loss improved from 0.60668 to 0.55118, saving model to weights.best.decor.cd_model.hdf5
Epoch 21/30
 - 41s - loss: 0.1457 - acc: 0.9485 - val_loss: 0.6678 - val_acc: 0.8542

Epoch 00021: val_loss did not improve from 0.55118
Epoch 22/30
 - 40s - loss: 0.0924 - acc: 0.9742 - val_loss: 0.5149 - val_acc: 0.8750

Epoch 00022: val_loss improved from 0.55118 to 0.51492, saving model to weights.best.decor.cd_model.hdf5
Epoch 23/30
 - 40s - loss: 0.0995 - acc: 0.9742 - val_loss: 0.6233 - val_acc: 0.8542

Epoch 00023: val_loss did not improve from 0.51492
Epoch 24/30
 - 41s - loss: 0.0887 - acc: 0.9639 - val_loss: 0.6824 - val_acc: 0.8125

Epoch 00024: val_loss did not improve from 0.51492
Epoch 25/30
 - 40s - loss: 0.0756 - acc: 0.9716 - val_loss: 0.5981 - val_acc: 0.8958

Epoch 00025: val_loss did not improve from 0.51492
Epoch 26/30
 - 40s - loss: 0.0702 - acc: 0.9691 - val_loss: 0.6440 - val_acc: 0.8542

Epoch 00026: val_loss did not improve from 0.51492
Epoch 27/30
 - 40s - loss: 0.0595 - acc: 0.9845 - val_loss: 0.6434 - val_acc: 0.8542

Epoch 00027: val_loss did not improve from 0.51492

Epoch 00027: ReduceLROnPlateau reducing learning rate to 0.0005000000237487257.
Epoch 28/30
 - 42s - loss: 0.0460 - acc: 0.9897 - val_loss: 0.6220 - val_acc: 0.8542

Epoch 00028: val_loss did not improve from 0.51492
Epoch 29/30
 - 40s - loss: 0.0462 - acc: 0.9820 - val_loss: 0.5872 - val_acc: 0.8958

Epoch 00029: val_loss did not improve from 0.51492
Epoch 30/30
 - 39s - loss: 0.0322 - acc: 1.0000 - val_loss: 0.5772 - val_acc: 0.8750

Epoch 00030: val_loss did not improve from 0.51492
In [47]:
hide_code
# Plot the training history
history_plot(cd_history)
In [48]:
hide_code
# Load the model with the best validation accuracy
cd_model.load_weights('weights.best.decor.cd_model.hdf5')
# Calculate classification accuracy on the testing set
cd_score = cd_model.evaluate(x_test3, y_test3)
cd_score
49/49 [==============================] - 5s 101ms/step
Out[48]:
[0.4226962364449793, 0.8979591873227334]
In [59]:
hide_code
# Save previous experiment results for test points/ 30 epochs
leaky_alphas = np.array([0.005, 0.01, 0.015, 0.02, 0.025])
cd_losses_30 = np.array([0.47320266524139715, 0.63012125540752795, 0.45978901459246263, 
                         0.50793846650999419, 0.50199888798655301])
cd_accuracies_30 = np.array([0.87755102405742724, 0.81632653547793022, 0.83673469995965766, 
                             0.89795918610631198, 0.87755102405742724])
In [56]:
hide_code
# Save previous experiment results for test points/ 20 epochs
cd_losses_20 = np.array([0.65711019720349995, 0.66739930911939971, 0.59441830917280547, 
                         0.66775868863475563, 0.68195151066293525])
cd_accuracies_20 = np.array([0.83673469874323636, 0.79591837342904537, 0.81632653669435151, 
                             0.79591837342904537, 0.81632653547793022])
In [60]:
hide_code
# Plot the results
plt.figure(figsize=(18, 6))
plt.plot(leaky_alphas, cd_losses_20, '-o', color='slategray', label = 'loss')
plt.plot(leaky_alphas, cd_accuracies_20, '-o', color='#4876ff', label = 'accuracy')
plt.xlabel('leaky alphas')
plt.ylabel('results')
plt.legend(loc=3)
plt.title('Decor Recognition. Test Results for Color Images: Loss Function and Accuracy');
In [60]:
hide_code
# Plot the previous experiment results
plt.figure(figsize=(18, 6))
plt.plot(leaky_alphas, cd_losses_30, '-o', color='slategray', label = 'loss')
plt.plot(leaky_alphas, cd_accuracies_30, '-o', color='#4876ff', label = 'accuracy')
plt.xlabel('leaky alphas')
plt.ylabel('results')
plt.legend(loc=3)
plt.title('Decor Recognition. Test Results for Color Images (30 Epochs): Loss Function and Accuracy');

Country & Decor Recognition for Grayscaled Images

In [57]:
hide_code
def gray_cc_model(leaky_alpha):
    model = Sequential()
    
    # TODO: Define a model architecture
    model.add(Conv2D(32, (5, 5), padding='same', input_shape=x_train4.shape[1:]))
    model.add(LeakyReLU(alpha=leaky_alpha))    
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Conv2D(96, (5, 5)))
    model.add(LeakyReLU(alpha=leaky_alpha))    
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    
    model.add(GlobalMaxPooling2D()) 
    
    model.add(Dense(512))
    model.add(LeakyReLU(alpha=leaky_alpha))
    model.add(Dropout(0.5))   

    model.add(Dense(4))
    model.add(Activation('softmax'))
    
    # TODO: Compile the model
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
    return model
In [58]:
hide_code
# Train the model
gray_cc_model = gray_cc_model(0.02)
gray_cc_checkpointer = ModelCheckpoint(filepath='weights.best.decor.gray_cc_model.hdf5', 
                                       verbose=2, save_best_only=True)
gray_cc_lr_reduction = ReduceLROnPlateau(monitor='val_loss', 
                                         patience=5, verbose=2, factor=0.8)
gray_cc_history = gray_cc_model.fit(x_train2, y_train2, 
                                    epochs=30, batch_size=16, verbose=2,                                    
                                    validation_data=(x_valid2, y_valid2),
                                    callbacks=[gray_cc_checkpointer, gray_cc_lr_reduction])
Train on 388 samples, validate on 48 samples
Epoch 1/30
 - 56s - loss: 1.1769 - acc: 0.5567 - val_loss: 1.2329 - val_acc: 0.5417

Epoch 00001: val_loss improved from inf to 1.23288, saving model to weights.best.decor.gray_cc_model.hdf5
Epoch 2/30
 - 43s - loss: 1.1107 - acc: 0.5876 - val_loss: 1.1930 - val_acc: 0.5417

Epoch 00002: val_loss improved from 1.23288 to 1.19302, saving model to weights.best.decor.gray_cc_model.hdf5
Epoch 3/30
 - 51s - loss: 1.1291 - acc: 0.5876 - val_loss: 1.1511 - val_acc: 0.5417

Epoch 00003: val_loss improved from 1.19302 to 1.15112, saving model to weights.best.decor.gray_cc_model.hdf5
Epoch 4/30
 - 35s - loss: 1.0939 - acc: 0.5902 - val_loss: 1.2355 - val_acc: 0.5417

Epoch 00004: val_loss did not improve from 1.15112
Epoch 5/30
 - 35s - loss: 1.1052 - acc: 0.5876 - val_loss: 1.2021 - val_acc: 0.5417

Epoch 00005: val_loss did not improve from 1.15112
Epoch 6/30
 - 35s - loss: 1.1074 - acc: 0.5876 - val_loss: 1.2596 - val_acc: 0.5417

Epoch 00006: val_loss did not improve from 1.15112
Epoch 7/30
 - 35s - loss: 1.1054 - acc: 0.5851 - val_loss: 1.1417 - val_acc: 0.5417

Epoch 00007: val_loss improved from 1.15112 to 1.14168, saving model to weights.best.decor.gray_cc_model.hdf5
Epoch 8/30
 - 34s - loss: 1.0723 - acc: 0.5851 - val_loss: 1.1197 - val_acc: 0.5417

Epoch 00008: val_loss improved from 1.14168 to 1.11966, saving model to weights.best.decor.gray_cc_model.hdf5
Epoch 9/30
 - 35s - loss: 1.0738 - acc: 0.5954 - val_loss: 1.1652 - val_acc: 0.5417

Epoch 00009: val_loss did not improve from 1.11966
Epoch 10/30
 - 35s - loss: 1.0834 - acc: 0.5773 - val_loss: 1.1793 - val_acc: 0.5417

Epoch 00010: val_loss did not improve from 1.11966
Epoch 11/30
 - 35s - loss: 1.0652 - acc: 0.5902 - val_loss: 1.1318 - val_acc: 0.5417

Epoch 00011: val_loss did not improve from 1.11966
Epoch 12/30
 - 35s - loss: 1.0670 - acc: 0.5696 - val_loss: 1.2635 - val_acc: 0.5417

Epoch 00012: val_loss did not improve from 1.11966
Epoch 13/30
 - 37s - loss: 1.0310 - acc: 0.5851 - val_loss: 1.3684 - val_acc: 0.3750

Epoch 00013: val_loss did not improve from 1.11966

Epoch 00013: ReduceLROnPlateau reducing learning rate to 0.000800000037997961.
Epoch 14/30
 - 36s - loss: 1.0061 - acc: 0.5954 - val_loss: 1.3058 - val_acc: 0.3958

Epoch 00014: val_loss did not improve from 1.11966
Epoch 15/30
 - 37s - loss: 1.0213 - acc: 0.6005 - val_loss: 1.2925 - val_acc: 0.4583

Epoch 00015: val_loss did not improve from 1.11966
Epoch 16/30
 - 36s - loss: 0.9925 - acc: 0.6031 - val_loss: 1.0663 - val_acc: 0.5417

Epoch 00016: val_loss improved from 1.11966 to 1.06632, saving model to weights.best.decor.gray_cc_model.hdf5
Epoch 17/30
 - 45s - loss: 0.9922 - acc: 0.6005 - val_loss: 1.0628 - val_acc: 0.5417

Epoch 00017: val_loss improved from 1.06632 to 1.06276, saving model to weights.best.decor.gray_cc_model.hdf5
Epoch 18/30
 - 36s - loss: 0.9603 - acc: 0.6263 - val_loss: 1.0783 - val_acc: 0.5417

Epoch 00018: val_loss did not improve from 1.06276
Epoch 19/30
 - 35s - loss: 0.9682 - acc: 0.6031 - val_loss: 1.0438 - val_acc: 0.5625

Epoch 00019: val_loss improved from 1.06276 to 1.04384, saving model to weights.best.decor.gray_cc_model.hdf5
Epoch 20/30
 - 36s - loss: 0.9951 - acc: 0.6005 - val_loss: 1.1235 - val_acc: 0.5625

Epoch 00020: val_loss did not improve from 1.04384
Epoch 21/30
 - 37s - loss: 0.9463 - acc: 0.6186 - val_loss: 1.0638 - val_acc: 0.5625

Epoch 00021: val_loss did not improve from 1.04384
Epoch 22/30
 - 35s - loss: 0.9071 - acc: 0.6340 - val_loss: 1.0788 - val_acc: 0.5417

Epoch 00022: val_loss did not improve from 1.04384
Epoch 23/30
 - 36s - loss: 0.9354 - acc: 0.6237 - val_loss: 1.5939 - val_acc: 0.1250

Epoch 00023: val_loss did not improve from 1.04384
Epoch 24/30
 - 36s - loss: 0.9177 - acc: 0.6237 - val_loss: 1.0893 - val_acc: 0.5208

Epoch 00024: val_loss did not improve from 1.04384

Epoch 00024: ReduceLROnPlateau reducing learning rate to 0.0006400000303983689.
Epoch 25/30
 - 35s - loss: 0.8899 - acc: 0.6314 - val_loss: 1.0142 - val_acc: 0.5625

Epoch 00025: val_loss improved from 1.04384 to 1.01420, saving model to weights.best.decor.gray_cc_model.hdf5
Epoch 26/30
 - 35s - loss: 0.8687 - acc: 0.6366 - val_loss: 1.0911 - val_acc: 0.5208

Epoch 00026: val_loss did not improve from 1.01420
Epoch 27/30
 - 36s - loss: 0.8620 - acc: 0.6624 - val_loss: 1.0323 - val_acc: 0.5625

Epoch 00027: val_loss did not improve from 1.01420
Epoch 28/30
 - 36s - loss: 0.8355 - acc: 0.6572 - val_loss: 1.1610 - val_acc: 0.5000

Epoch 00028: val_loss did not improve from 1.01420
Epoch 29/30
 - 37s - loss: 0.7904 - acc: 0.6856 - val_loss: 1.0329 - val_acc: 0.5625

Epoch 00029: val_loss did not improve from 1.01420
Epoch 30/30
 - 36s - loss: 0.8379 - acc: 0.6753 - val_loss: 1.0093 - val_acc: 0.5833

Epoch 00030: val_loss improved from 1.01420 to 1.00932, saving model to weights.best.decor.gray_cc_model.hdf5
In [59]:
hide_code
# Plot the training history
history_plot(gray_cc_history)
In [60]:
hide_code
# Load the model with the best validation accuracy
gray_cc_model.load_weights('weights.best.decor.gray_cc_model.hdf5')
# Calculate classification accuracy on the testing set
gray_cc_score = gray_cc_model.evaluate(x_test2, y_test2)
gray_cc_score
49/49 [==============================] - 2s 33ms/step
Out[60]:
[1.2541238872372373, 0.4489796015681053]
In [61]:
hide_code
def gray_cd_model(leaky_alpha):
    model = Sequential()
    
    # TODO: Define a model architecture
    model.add(Conv2D(32, (5, 5), padding='same', input_shape=x_train4.shape[1:]))
    model.add(LeakyReLU(alpha=leaky_alpha))    
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Conv2D(96, (5, 5)))
    model.add(LeakyReLU(alpha=leaky_alpha))    
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    
    model.add(GlobalMaxPooling2D()) 
    
    model.add(Dense(512, activation='tanh'))
    model.add(Dropout(0.5))   

    model.add(Dense(7))
    model.add(Activation('softmax'))
    
    # TODO: Compile the model
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
    return model
In [62]:
hide_code
# Train the model
gray_cd_model = gray_cd_model(0.025)
gray_cd_checkpointer = ModelCheckpoint(filepath='weights.best.decor.gray_cd_model.hdf5', 
                                       verbose=2, save_best_only=True)
gray_cd_lr_reduction = ReduceLROnPlateau(monitor='val_loss', 
                                         patience=5, verbose=2, factor=0.5)
gray_cd_history = gray_cd_model.fit(x_train4, y_train4, 
                                    epochs=30, batch_size=16, verbose=2,
                                    validation_data=(x_valid4, y_valid4),
                                    callbacks=[gray_cd_checkpointer, gray_cd_lr_reduction])
Train on 388 samples, validate on 48 samples
Epoch 1/30
 - 70s - loss: 2.0094 - acc: 0.1804 - val_loss: 1.9319 - val_acc: 0.1875

Epoch 00001: val_loss improved from inf to 1.93191, saving model to weights.best.decor.gray_cd_model.hdf5
Epoch 2/30
 - 37s - loss: 1.8980 - acc: 0.1856 - val_loss: 1.9241 - val_acc: 0.1875

Epoch 00002: val_loss improved from 1.93191 to 1.92406, saving model to weights.best.decor.gray_cd_model.hdf5
Epoch 3/30
 - 38s - loss: 1.8785 - acc: 0.2010 - val_loss: 1.8987 - val_acc: 0.2500

Epoch 00003: val_loss improved from 1.92406 to 1.89869, saving model to weights.best.decor.gray_cd_model.hdf5
Epoch 4/30
 - 43s - loss: 1.8612 - acc: 0.2191 - val_loss: 1.8989 - val_acc: 0.2500

Epoch 00004: val_loss did not improve from 1.89869
Epoch 5/30
 - 43s - loss: 1.8218 - acc: 0.2371 - val_loss: 1.9066 - val_acc: 0.2292

Epoch 00005: val_loss did not improve from 1.89869
Epoch 6/30
 - 38s - loss: 1.7991 - acc: 0.2500 - val_loss: 1.9225 - val_acc: 0.1875

Epoch 00006: val_loss did not improve from 1.89869
Epoch 7/30
 - 44s - loss: 1.7968 - acc: 0.2629 - val_loss: 1.9115 - val_acc: 0.2083

Epoch 00007: val_loss did not improve from 1.89869
Epoch 8/30
 - 42s - loss: 1.7379 - acc: 0.3144 - val_loss: 1.8219 - val_acc: 0.2500

Epoch 00008: val_loss improved from 1.89869 to 1.82190, saving model to weights.best.decor.gray_cd_model.hdf5
Epoch 9/30
 - 42s - loss: 1.7120 - acc: 0.3015 - val_loss: 1.8575 - val_acc: 0.2292

Epoch 00009: val_loss did not improve from 1.82190
Epoch 10/30
 - 49s - loss: 1.7128 - acc: 0.3479 - val_loss: 1.8928 - val_acc: 0.2708

Epoch 00010: val_loss did not improve from 1.82190
Epoch 11/30
 - 48s - loss: 1.6588 - acc: 0.3531 - val_loss: 1.9037 - val_acc: 0.1875

Epoch 00011: val_loss did not improve from 1.82190
Epoch 12/30
 - 42s - loss: 1.5828 - acc: 0.4175 - val_loss: 1.7708 - val_acc: 0.2708

Epoch 00012: val_loss improved from 1.82190 to 1.77082, saving model to weights.best.decor.gray_cd_model.hdf5
Epoch 13/30
 - 59s - loss: 1.5653 - acc: 0.3995 - val_loss: 1.8196 - val_acc: 0.2500

Epoch 00013: val_loss did not improve from 1.77082
Epoch 14/30
 - 49s - loss: 1.5263 - acc: 0.4227 - val_loss: 1.8404 - val_acc: 0.2292

Epoch 00014: val_loss did not improve from 1.77082
Epoch 15/30
 - 44s - loss: 1.5086 - acc: 0.4149 - val_loss: 1.8089 - val_acc: 0.3125

Epoch 00015: val_loss did not improve from 1.77082
Epoch 16/30
 - 42s - loss: 1.4358 - acc: 0.4381 - val_loss: 2.0256 - val_acc: 0.2083

Epoch 00016: val_loss did not improve from 1.77082
Epoch 17/30
 - 42s - loss: 1.4044 - acc: 0.4768 - val_loss: 1.7436 - val_acc: 0.3542

Epoch 00017: val_loss improved from 1.77082 to 1.74360, saving model to weights.best.decor.gray_cd_model.hdf5
Epoch 18/30
 - 40s - loss: 1.3597 - acc: 0.4742 - val_loss: 1.7323 - val_acc: 0.2917

Epoch 00018: val_loss improved from 1.74360 to 1.73234, saving model to weights.best.decor.gray_cd_model.hdf5
Epoch 19/30
 - 40s - loss: 1.2979 - acc: 0.5077 - val_loss: 1.7711 - val_acc: 0.2708

Epoch 00019: val_loss did not improve from 1.73234
Epoch 20/30
 - 45s - loss: 1.2818 - acc: 0.5077 - val_loss: 1.8414 - val_acc: 0.2292

Epoch 00020: val_loss did not improve from 1.73234
Epoch 21/30
 - 42s - loss: 1.2224 - acc: 0.5438 - val_loss: 1.7560 - val_acc: 0.1875

Epoch 00021: val_loss did not improve from 1.73234
Epoch 22/30
 - 44s - loss: 1.1772 - acc: 0.5722 - val_loss: 1.7652 - val_acc: 0.3542

Epoch 00022: val_loss did not improve from 1.73234
Epoch 23/30
 - 44s - loss: 1.1422 - acc: 0.5619 - val_loss: 1.8032 - val_acc: 0.2500

Epoch 00023: val_loss did not improve from 1.73234

Epoch 00023: ReduceLROnPlateau reducing learning rate to 0.0005000000237487257.
Epoch 24/30
 - 39s - loss: 1.0017 - acc: 0.6443 - val_loss: 1.7171 - val_acc: 0.3750

Epoch 00024: val_loss improved from 1.73234 to 1.71712, saving model to weights.best.decor.gray_cd_model.hdf5
Epoch 25/30
 - 43s - loss: 0.9414 - acc: 0.6881 - val_loss: 1.6791 - val_acc: 0.3542

Epoch 00025: val_loss improved from 1.71712 to 1.67914, saving model to weights.best.decor.gray_cd_model.hdf5
Epoch 26/30
 - 43s - loss: 0.9083 - acc: 0.7010 - val_loss: 1.6727 - val_acc: 0.3750

Epoch 00026: val_loss improved from 1.67914 to 1.67272, saving model to weights.best.decor.gray_cd_model.hdf5
Epoch 27/30
 - 46s - loss: 0.8824 - acc: 0.7216 - val_loss: 1.6921 - val_acc: 0.2917

Epoch 00027: val_loss did not improve from 1.67272
Epoch 28/30
 - 47s - loss: 0.8752 - acc: 0.7088 - val_loss: 1.6795 - val_acc: 0.3333

Epoch 00028: val_loss did not improve from 1.67272
Epoch 29/30
 - 61s - loss: 0.8412 - acc: 0.6985 - val_loss: 1.6660 - val_acc: 0.3333

Epoch 00029: val_loss improved from 1.67272 to 1.66595, saving model to weights.best.decor.gray_cd_model.hdf5
Epoch 30/30
 - 54s - loss: 0.8216 - acc: 0.7242 - val_loss: 1.7383 - val_acc: 0.3750

Epoch 00030: val_loss did not improve from 1.66595
In [63]:
hide_code
# Plot the training history
history_plot(gray_cd_history)
In [64]:
hide_code
# Load the model with the best validation accuracy
gray_cd_model.load_weights('weights.best.decor.gray_cd_model.hdf5')
# Calculate classification accuracy on the testing set
gray_cd_score = gray_cd_model.evaluate(x_test4, y_test4)
gray_cd_score
49/49 [==============================] - 5s 104ms/step
Out[64]:
[1.6201995367906532, 0.3265306165023726]
In [61]:
hide_code
# Save previous experiment results for test points / 30 epochs 
# Exp #1: 1st - Conv2D(32, (5, 5)), 2nd - Conv2D(128, (5, 5))
leaky_alphas = np.array([0.005, 0.01, 0.015, 0.02, 0.025])

gray_cd_losses = np.array([1.777957059899155, 1.6141312146673397, 1.6672236432834548, 
                           2.0078208689786949, 1.8407663958413261])
gray_cd_accuracies = np.array([0.26530612260103226, 0.34693877581430943, 0.28571428579031205, 
                               0.224489797438894, 0.30612245050011849])
In [76]:
hide_code
# Save previous experiment results for test points / 30 epochs / 
# Exp #2: 1st - Conv2D(64, (5, 5)), 2nd - Conv2D(256, (5, 5))

gray_cd_losses2 = np.array([1.6528263238011573, 1.7795450176511491, 1.6591916619514933, 
                           1.6295414554829499, 1])
gray_cd_accuracies2 = np.array([0.32653061650237258, 0.20408163417358788, 0.30612245050011849, 
                               0.24489796070420011, 0])
In [77]:
hide_code
# Plot the results
plt.figure(figsize=(18, 6))
plt.plot(gray_cd_losses, leaky_alphas, '-o', color='slategray', label = 'loss')
plt.plot(gray_cd_accuracies, leaky_alphas, '-o', color='#4876ff', label = 'accuracy')
plt.xlabel('results')
plt.ylabel('leaky alphas')
plt.legend()
plt.title('Decor Recognition. Test Results for Grayscaled Images: Loss Function and Accuracy');
In [78]:
hide_code
# Plot the results
plt.figure(figsize=(18, 6))
plt.plot(gray_cd_losses2, leaky_alphas, '-o', color='slategray', label = 'loss')
plt.plot(gray_cd_accuracies2, leaky_alphas, '-o', color='#4876ff', label = 'accuracy')
plt.xlabel('results')
plt.ylabel('leaky alphas')
plt.legend()
plt.title('Decor Recognition Exp #2. Test Results for Grayscaled Images: Loss Function and Accuracy');

Step 5. Create Neural Networks for Multi-Label Classification

In [34]:
hide_code
def multi_model(leaky_alpha):    
    model_input = Input(shape=(150, 150, 3))
    x = BatchNormalization()(model_input)
    # TODO: Define a model architecture
    x = Conv2D(32, (5, 5), padding='same')(model_input)
    x = LeakyReLU(alpha=leaky_alpha)(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)    
    x = Dropout(0.25)(x)
    
    x = Conv2D(128, (5, 5), padding='same')(x)
    x = LeakyReLU(alpha=leaky_alpha)(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)    
    x = Dropout(0.25)(x)
              
    x = GlobalMaxPooling2D()(x)
    
    x = Dense(512)(x) 
    x = LeakyReLU(alpha=leaky_alpha)(x)
    x = Dropout(0.25)(x)
    
    y1 = Dense(4, activation='softmax')(x)
    y2 = Dense(7, activation='softmax')(x)
    y3 = Dense(2, activation='softmax')(x)
    
    model = Model(inputs=model_input, outputs=[y1, y2, y3])
    
    # TODO: Compile the model
    model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=['accuracy'])
    
    return model
In [259]:
hide_code
# Train the model
multi_model = multi_model(0.005)
multi_checkpointer = ModelCheckpoint(filepath='weights.best.decor.multi_model.hdf5', 
                                     verbose=2, save_best_only=True)
multi_history = multi_model.fit(x_train5, y_train5_list, 
                                epochs=20, batch_size=16, verbose=2,
                                validation_data=(x_valid5, y_valid5_list),
                                callbacks=[multi_checkpointer])
Train on 388 samples, validate on 48 samples
Epoch 1/20
Epoch 00000: val_loss improved from inf to 3.69059, saving model to weights.best.decor.multi_model.hdf5
125s - loss: 3.8243 - dense_104_loss: 1.1645 - dense_105_loss: 1.9464 - dense_106_loss: 0.7133 - dense_104_acc: 0.5644 - dense_105_acc: 0.2294 - dense_106_acc: 0.5412 - val_loss: 3.6906 - val_dense_104_loss: 1.1744 - val_dense_105_loss: 1.8285 - val_dense_106_loss: 0.6877 - val_dense_104_acc: 0.4375 - val_dense_105_acc: 0.2500 - val_dense_106_acc: 0.5208
Epoch 2/20
Epoch 00001: val_loss improved from 3.69059 to 3.44568, saving model to weights.best.decor.multi_model.hdf5
93s - loss: 3.4733 - dense_104_loss: 1.1045 - dense_105_loss: 1.6296 - dense_106_loss: 0.7391 - dense_104_acc: 0.5335 - dense_105_acc: 0.3582 - dense_106_acc: 0.4974 - val_loss: 3.4457 - val_dense_104_loss: 1.0960 - val_dense_105_loss: 1.6365 - val_dense_106_loss: 0.7132 - val_dense_104_acc: 0.6042 - val_dense_105_acc: 0.4167 - val_dense_106_acc: 0.4792
Epoch 3/20
Epoch 00002: val_loss improved from 3.44568 to 2.99757, saving model to weights.best.decor.multi_model.hdf5
83s - loss: 3.2240 - dense_104_loss: 1.0125 - dense_105_loss: 1.4062 - dense_106_loss: 0.8053 - dense_104_acc: 0.5567 - dense_105_acc: 0.4278 - dense_106_acc: 0.4897 - val_loss: 2.9976 - val_dense_104_loss: 0.9594 - val_dense_105_loss: 1.3465 - val_dense_106_loss: 0.6917 - val_dense_104_acc: 0.5625 - val_dense_105_acc: 0.4583 - val_dense_106_acc: 0.6042
Epoch 4/20
Epoch 00003: val_loss did not improve
86s - loss: 2.7793 - dense_104_loss: 0.8227 - dense_105_loss: 1.2057 - dense_106_loss: 0.7509 - dense_104_acc: 0.6418 - dense_105_acc: 0.5180 - dense_106_acc: 0.5284 - val_loss: 3.2438 - val_dense_104_loss: 0.9545 - val_dense_105_loss: 1.4619 - val_dense_106_loss: 0.8274 - val_dense_104_acc: 0.6250 - val_dense_105_acc: 0.3542 - val_dense_106_acc: 0.5208
Epoch 5/20
Epoch 00004: val_loss did not improve
81s - loss: 2.5284 - dense_104_loss: 0.6891 - dense_105_loss: 1.0347 - dense_106_loss: 0.8046 - dense_104_acc: 0.7216 - dense_105_acc: 0.6108 - dense_106_acc: 0.5567 - val_loss: 3.4120 - val_dense_104_loss: 0.9442 - val_dense_105_loss: 1.3694 - val_dense_106_loss: 1.0984 - val_dense_104_acc: 0.5833 - val_dense_105_acc: 0.4792 - val_dense_106_acc: 0.4792
Epoch 6/20
Epoch 00005: val_loss improved from 2.99757 to 2.57408, saving model to weights.best.decor.multi_model.hdf5
77s - loss: 2.8380 - dense_104_loss: 0.8745 - dense_105_loss: 1.1120 - dense_106_loss: 0.8515 - dense_104_acc: 0.7062 - dense_105_acc: 0.6314 - dense_106_acc: 0.5979 - val_loss: 2.5741 - val_dense_104_loss: 0.7432 - val_dense_105_loss: 1.2040 - val_dense_106_loss: 0.6269 - val_dense_104_acc: 0.7708 - val_dense_105_acc: 0.6667 - val_dense_106_acc: 0.7292
Epoch 7/20
Epoch 00006: val_loss did not improve
84s - loss: 2.3845 - dense_104_loss: 0.6799 - dense_105_loss: 0.9117 - dense_106_loss: 0.7929 - dense_104_acc: 0.7345 - dense_105_acc: 0.6933 - dense_106_acc: 0.5902 - val_loss: 5.9925 - val_dense_104_loss: 2.3594 - val_dense_105_loss: 1.9447 - val_dense_106_loss: 1.6883 - val_dense_104_acc: 0.5833 - val_dense_105_acc: 0.4167 - val_dense_106_acc: 0.4792
Epoch 8/20
Epoch 00007: val_loss improved from 2.57408 to 2.13224, saving model to weights.best.decor.multi_model.hdf5
78s - loss: 2.2541 - dense_104_loss: 0.6833 - dense_105_loss: 0.8591 - dense_106_loss: 0.7117 - dense_104_acc: 0.7964 - dense_105_acc: 0.7139 - dense_106_acc: 0.6186 - val_loss: 2.1322 - val_dense_104_loss: 0.6128 - val_dense_105_loss: 0.8292 - val_dense_106_loss: 0.6902 - val_dense_104_acc: 0.7917 - val_dense_105_acc: 0.7500 - val_dense_106_acc: 0.6250
Epoch 9/20
Epoch 00008: val_loss did not improve
75s - loss: 1.9156 - dense_104_loss: 0.4512 - dense_105_loss: 0.7561 - dense_106_loss: 0.7083 - dense_104_acc: 0.8144 - dense_105_acc: 0.7320 - dense_106_acc: 0.6366 - val_loss: 7.3890 - val_dense_104_loss: 2.6436 - val_dense_105_loss: 3.0160 - val_dense_106_loss: 1.7294 - val_dense_104_acc: 0.2917 - val_dense_105_acc: 0.3958 - val_dense_106_acc: 0.4792
Epoch 10/20
Epoch 00009: val_loss did not improve
81s - loss: 2.8636 - dense_104_loss: 0.9291 - dense_105_loss: 1.1442 - dense_106_loss: 0.7904 - dense_104_acc: 0.7345 - dense_105_acc: 0.6830 - dense_106_acc: 0.6160 - val_loss: 2.3807 - val_dense_104_loss: 0.5198 - val_dense_105_loss: 0.7758 - val_dense_106_loss: 1.0851 - val_dense_104_acc: 0.8125 - val_dense_105_acc: 0.7500 - val_dense_106_acc: 0.5417
Epoch 11/20
Epoch 00010: val_loss improved from 2.13224 to 1.83151, saving model to weights.best.decor.multi_model.hdf5
72s - loss: 2.0308 - dense_104_loss: 0.4455 - dense_105_loss: 0.7333 - dense_106_loss: 0.8520 - dense_104_acc: 0.8325 - dense_105_acc: 0.7629 - dense_106_acc: 0.6289 - val_loss: 1.8315 - val_dense_104_loss: 0.5108 - val_dense_105_loss: 0.6841 - val_dense_106_loss: 0.6366 - val_dense_104_acc: 0.7917 - val_dense_105_acc: 0.7500 - val_dense_106_acc: 0.6458
Epoch 12/20
Epoch 00011: val_loss did not improve
70s - loss: 3.9423 - dense_104_loss: 1.0871 - dense_105_loss: 1.5830 - dense_106_loss: 1.2722 - dense_104_acc: 0.7912 - dense_105_acc: 0.7062 - dense_106_acc: 0.6495 - val_loss: 13.1927 - val_dense_104_loss: 4.8810 - val_dense_105_loss: 6.2934 - val_dense_106_loss: 2.0182 - val_dense_104_acc: 0.5625 - val_dense_105_acc: 0.4583 - val_dense_106_acc: 0.5417
Epoch 13/20
Epoch 00012: val_loss did not improve
75s - loss: 22.2251 - dense_104_loss: 6.4060 - dense_105_loss: 9.6591 - dense_106_loss: 6.1600 - dense_104_acc: 0.5541 - dense_105_acc: 0.3454 - dense_106_acc: 0.5644 - val_loss: 28.4840 - val_dense_104_loss: 7.3875 - val_dense_105_loss: 13.3732 - val_dense_106_loss: 7.7233 - val_dense_104_acc: 0.5417 - val_dense_105_acc: 0.1667 - val_dense_106_acc: 0.5208
Epoch 14/20
Epoch 00013: val_loss did not improve
75s - loss: 27.6251 - dense_104_loss: 6.6466 - dense_105_loss: 12.8779 - dense_106_loss: 8.1006 - dense_104_acc: 0.5876 - dense_105_acc: 0.2010 - dense_106_acc: 0.4974 - val_loss: 28.5425 - val_dense_104_loss: 7.3875 - val_dense_105_loss: 13.4317 - val_dense_106_loss: 7.7233 - val_dense_104_acc: 0.5417 - val_dense_105_acc: 0.1667 - val_dense_106_acc: 0.5208
Epoch 15/20
Epoch 00014: val_loss did not improve
76s - loss: 29.2082 - dense_104_loss: 6.6466 - dense_105_loss: 14.4609 - dense_106_loss: 8.1006 - dense_104_acc: 0.5876 - dense_105_acc: 0.1005 - dense_106_acc: 0.4974 - val_loss: 28.5425 - val_dense_104_loss: 7.3875 - val_dense_105_loss: 13.4317 - val_dense_106_loss: 7.7233 - val_dense_104_acc: 0.5417 - val_dense_105_acc: 0.1667 - val_dense_106_acc: 0.5208
Epoch 16/20
Epoch 00015: val_loss did not improve
80s - loss: 29.2452 - dense_104_loss: 6.6466 - dense_105_loss: 14.4980 - dense_106_loss: 8.1006 - dense_104_acc: 0.5876 - dense_105_acc: 0.1005 - dense_106_acc: 0.4974 - val_loss: 28.5425 - val_dense_104_loss: 7.3875 - val_dense_105_loss: 13.4317 - val_dense_106_loss: 7.7233 - val_dense_104_acc: 0.5417 - val_dense_105_acc: 0.1667 - val_dense_106_acc: 0.5208
Epoch 17/20
Epoch 00016: val_loss did not improve
73s - loss: 29.2452 - dense_104_loss: 6.6466 - dense_105_loss: 14.4980 - dense_106_loss: 8.1006 - dense_104_acc: 0.5876 - dense_105_acc: 0.1005 - dense_106_acc: 0.4974 - val_loss: 28.5425 - val_dense_104_loss: 7.3875 - val_dense_105_loss: 13.4317 - val_dense_106_loss: 7.7233 - val_dense_104_acc: 0.5417 - val_dense_105_acc: 0.1667 - val_dense_106_acc: 0.5208
Epoch 18/20
Epoch 00017: val_loss did not improve
82s - loss: 29.2452 - dense_104_loss: 6.6466 - dense_105_loss: 14.4980 - dense_106_loss: 8.1006 - dense_104_acc: 0.5876 - dense_105_acc: 0.1005 - dense_106_acc: 0.4974 - val_loss: 28.5425 - val_dense_104_loss: 7.3875 - val_dense_105_loss: 13.4317 - val_dense_106_loss: 7.7233 - val_dense_104_acc: 0.5417 - val_dense_105_acc: 0.1667 - val_dense_106_acc: 0.5208
Epoch 19/20
Epoch 00018: val_loss did not improve
81s - loss: 29.2452 - dense_104_loss: 6.6466 - dense_105_loss: 14.4980 - dense_106_loss: 8.1006 - dense_104_acc: 0.5876 - dense_105_acc: 0.1005 - dense_106_acc: 0.4974 - val_loss: 28.5425 - val_dense_104_loss: 7.3875 - val_dense_105_loss: 13.4317 - val_dense_106_loss: 7.7233 - val_dense_104_acc: 0.5417 - val_dense_105_acc: 0.1667 - val_dense_106_acc: 0.5208
Epoch 20/20
Epoch 00019: val_loss did not improve
77s - loss: 29.2452 - dense_104_loss: 6.6466 - dense_105_loss: 14.4980 - dense_106_loss: 8.1006 - dense_104_acc: 0.5876 - dense_105_acc: 0.1005 - dense_106_acc: 0.4974 - val_loss: 28.5425 - val_dense_104_loss: 7.3875 - val_dense_105_loss: 13.4317 - val_dense_106_loss: 7.7233 - val_dense_104_acc: 0.5417 - val_dense_105_acc: 0.1667 - val_dense_106_acc: 0.5208
In [260]:
hide_code
# Load the model with the best validation accuracy
multi_model.load_weights('weights.best.decor.multi_model.hdf5')
# Calculate classification accuracy on the testing set
multi_scores = multi_model.evaluate(x_test5, y_test5_list, verbose=0)

print("Scores: \n" , (multi_scores))
print("Country label. Accuracy: %.2f%%" % (multi_scores[4]*100))
print("Decor label. Accuracy: %.2f%%" % (multi_scores[5]*100))
print("Type label. Accuracy: %.2f%%" % (multi_scores[6]*100))
Scores: 
 [2.3588784957418638, 0.79491684874709767, 0.82905603671560479, 0.734905651637486, 0.77551021138016063, 0.75510204933127578, 0.59183673651850954]
Country label. Accuracy: 77.55%
Decor label. Accuracy: 75.51%
Type label. Accuracy: 59.18%
In [41]:
hide_code
# Save previous experiment results for test points / 20 epochs
leaky_alphas = np.array([0.005, 0.01, 0.015, 0.02, 0.025])

multi_losses = np.array([2.3588784957418638, 2.4497200274954039, 3.1632251155619717, 
                         2.7588427845312626, 2.400767063607975])
multi_losses1 = np.array([0.79491684874709767, 0.78207657531816133, 0.84956139447737711, 
                          0.926974564182515, 0.7789653004432211])
multi_losses2 = np.array([0.82905603671560479, 0.98002039899631421, 1.0824525891518106, 
                          1.1792272621271562, 0.98129088294749356])
multi_losses3 = np.array([0.734905651637486, 0.68762307386009058, 1.2312110930073017, 
                          0.65264103850539845, 0.64051082304545814])

multi_accuracies1 = np.array([0.77551021138016063, 0.75510204933127578, 0.65306122509800657, 
                              0.67346939627005131, 0.67346939627005131])
multi_accuracies2 = np.array([0.75510204933127578, 0.63265306852301773, 0.55102041789463585, 
                              0.5714285787270994, 0.61224490525771158])
multi_accuracies3 = np.array([0.59183673651850954, 0.67346939748647261, 0.32653061376542464, 
                              0.59183674199240544, 0.67346939383720861])
In [42]:
hide_code
# Plot the results
plt.figure(figsize=(18, 6))
plt.plot(leaky_alphas, multi_losses2, '-o', color='slategray', label = 'loss')
plt.plot(leaky_alphas, multi_accuracies2, '-o', color='#4876ff', label = 'accuracy')
plt.xlabel('leaky alphas')
plt.ylabel('results')
plt.legend()
plt.title('Decor Recognition. Multi-Label Model. Test Results for Color Images: Loss Function and Accuracy');
In [36]:
hide_code
def gray_multi_model(leaky_alpha):    
    model_input = Input(shape=(150, 150, 1))
    x = BatchNormalization()(model_input)
    # TODO: Define a model architecture
    x = Conv2D(32, (5, 5), padding='same')(model_input)
    x = LeakyReLU(alpha=leaky_alpha)(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)    
    x = Dropout(0.25)(x)
    
    x = Conv2D(256, (5, 5), padding='same')(x)
    x = LeakyReLU(alpha=leaky_alpha)(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)    
    x = Dropout(0.25)(x)
              
    x = GlobalMaxPooling2D()(x)
    
    x = Dense(2048)(x) 
    x = LeakyReLU(alpha=leaky_alpha)(x)
    x = Dropout(0.25)(x)
    
    y1 = Dense(4, activation='softmax')(x)
    y2 = Dense(7, activation='softmax')(x)
    y3 = Dense(2, activation='softmax')(x)
    
    model = Model(inputs=model_input, outputs=[y1, y2, y3])
    
    # TODO: Compile the model
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
    
    return model
In [37]:
hide_code
# Train the model
gray_multi_model = gray_multi_model(0.005)
gray_multi_checkpointer = ModelCheckpoint(filepath='weights.best.decor.gray_multi_model.hdf5', 
                                          verbose=2, save_best_only=True)
gray_multi_history = gray_multi_model.fit(x_train6, y_train6_list, 
                                          epochs=20, batch_size=16, verbose=2,
                                          validation_data=(x_valid6, y_valid6_list),
                                          callbacks=[gray_multi_checkpointer])
Train on 388 samples, validate on 48 samples
Epoch 1/20
Epoch 00000: val_loss improved from inf to 3.75423, saving model to weights.best.decor.gray_multi_model.hdf5
138s - loss: 4.0302 - dense_4_loss: 1.1822 - dense_5_loss: 1.9907 - dense_6_loss: 0.8574 - dense_4_acc: 0.5722 - dense_5_acc: 0.2062 - dense_6_acc: 0.5129 - val_loss: 3.7542 - val_dense_4_loss: 1.1282 - val_dense_5_loss: 1.9335 - val_dense_6_loss: 0.6925 - val_dense_4_acc: 0.5417 - val_dense_5_acc: 0.1875 - val_dense_6_acc: 0.4792
Epoch 2/20
Epoch 00001: val_loss did not improve
136s - loss: 3.8064 - dense_4_loss: 1.1418 - dense_5_loss: 1.9399 - dense_6_loss: 0.7247 - dense_4_acc: 0.5876 - dense_5_acc: 0.2165 - dense_6_acc: 0.4691 - val_loss: 3.7709 - val_dense_4_loss: 1.1595 - val_dense_5_loss: 1.9204 - val_dense_6_loss: 0.6909 - val_dense_4_acc: 0.5417 - val_dense_5_acc: 0.1667 - val_dense_6_acc: 0.4792
Epoch 3/20
Epoch 00002: val_loss did not improve
127s - loss: 3.7253 - dense_4_loss: 1.1218 - dense_5_loss: 1.9234 - dense_6_loss: 0.6800 - dense_4_acc: 0.5696 - dense_5_acc: 0.1907 - dense_6_acc: 0.5747 - val_loss: 3.7736 - val_dense_4_loss: 1.1210 - val_dense_5_loss: 1.9211 - val_dense_6_loss: 0.7316 - val_dense_4_acc: 0.5417 - val_dense_5_acc: 0.1875 - val_dense_6_acc: 0.5208
Epoch 4/20
Epoch 00003: val_loss did not improve
125s - loss: 3.8152 - dense_4_loss: 1.1436 - dense_5_loss: 1.9297 - dense_6_loss: 0.7419 - dense_4_acc: 0.5799 - dense_5_acc: 0.1881 - dense_6_acc: 0.5361 - val_loss: 3.9425 - val_dense_4_loss: 1.1553 - val_dense_5_loss: 2.0360 - val_dense_6_loss: 0.7511 - val_dense_4_acc: 0.5417 - val_dense_5_acc: 0.1875 - val_dense_6_acc: 0.5208
Epoch 5/20
Epoch 00004: val_loss did not improve
125s - loss: 4.0058 - dense_4_loss: 1.2001 - dense_5_loss: 1.9877 - dense_6_loss: 0.8181 - dense_4_acc: 0.5155 - dense_5_acc: 0.2139 - dense_6_acc: 0.5155 - val_loss: 4.0056 - val_dense_4_loss: 1.2015 - val_dense_5_loss: 2.1379 - val_dense_6_loss: 0.6662 - val_dense_4_acc: 0.5417 - val_dense_5_acc: 0.1667 - val_dense_6_acc: 0.5417
Epoch 6/20
Epoch 00005: val_loss did not improve
137s - loss: 3.8791 - dense_4_loss: 1.1599 - dense_5_loss: 1.9761 - dense_6_loss: 0.7432 - dense_4_acc: 0.5567 - dense_5_acc: 0.2242 - dense_6_acc: 0.5722 - val_loss: 3.8749 - val_dense_4_loss: 1.1534 - val_dense_5_loss: 2.0082 - val_dense_6_loss: 0.7133 - val_dense_4_acc: 0.5625 - val_dense_5_acc: 0.2083 - val_dense_6_acc: 0.5417
Epoch 7/20
Epoch 00006: val_loss did not improve
128s - loss: 3.9752 - dense_4_loss: 1.1886 - dense_5_loss: 2.0393 - dense_6_loss: 0.7474 - dense_4_acc: 0.5155 - dense_5_acc: 0.1804 - dense_6_acc: 0.5954 - val_loss: 3.9773 - val_dense_4_loss: 1.3172 - val_dense_5_loss: 1.9843 - val_dense_6_loss: 0.6759 - val_dense_4_acc: 0.5417 - val_dense_5_acc: 0.1875 - val_dense_6_acc: 0.5208
Epoch 8/20
Epoch 00007: val_loss did not improve
125s - loss: 4.6348 - dense_4_loss: 1.3668 - dense_5_loss: 2.2197 - dense_6_loss: 1.0484 - dense_4_acc: 0.4897 - dense_5_acc: 0.1830 - dense_6_acc: 0.5490 - val_loss: 4.2362 - val_dense_4_loss: 1.1395 - val_dense_5_loss: 2.2001 - val_dense_6_loss: 0.8966 - val_dense_4_acc: 0.5417 - val_dense_5_acc: 0.0833 - val_dense_6_acc: 0.5208
Epoch 9/20
Epoch 00008: val_loss did not improve
128s - loss: 4.6082 - dense_4_loss: 1.4069 - dense_5_loss: 2.3700 - dense_6_loss: 0.8313 - dense_4_acc: 0.4510 - dense_5_acc: 0.1933 - dense_6_acc: 0.5928 - val_loss: 4.6491 - val_dense_4_loss: 1.3782 - val_dense_5_loss: 1.9379 - val_dense_6_loss: 1.3330 - val_dense_4_acc: 0.2500 - val_dense_5_acc: 0.1875 - val_dense_6_acc: 0.5208
Epoch 10/20
Epoch 00009: val_loss did not improve
130s - loss: 5.0768 - dense_4_loss: 1.4213 - dense_5_loss: 2.3807 - dense_6_loss: 1.2748 - dense_4_acc: 0.4768 - dense_5_acc: 0.1933 - dense_6_acc: 0.4742 - val_loss: 4.5775 - val_dense_4_loss: 1.4303 - val_dense_5_loss: 2.3392 - val_dense_6_loss: 0.8080 - val_dense_4_acc: 0.5417 - val_dense_5_acc: 0.1667 - val_dense_6_acc: 0.5208
Epoch 11/20
Epoch 00010: val_loss did not improve
146s - loss: 5.5052 - dense_4_loss: 1.5319 - dense_5_loss: 2.7327 - dense_6_loss: 1.2405 - dense_4_acc: 0.4871 - dense_5_acc: 0.1881 - dense_6_acc: 0.5258 - val_loss: 8.1273 - val_dense_4_loss: 2.4061 - val_dense_5_loss: 4.2715 - val_dense_6_loss: 1.4497 - val_dense_4_acc: 0.0833 - val_dense_5_acc: 0.0833 - val_dense_6_acc: 0.4792
Epoch 12/20
Epoch 00011: val_loss did not improve
162s - loss: 6.5809 - dense_4_loss: 2.0081 - dense_5_loss: 3.2159 - dense_6_loss: 1.3569 - dense_4_acc: 0.4536 - dense_5_acc: 0.1985 - dense_6_acc: 0.5490 - val_loss: 4.8207 - val_dense_4_loss: 1.5429 - val_dense_5_loss: 2.6096 - val_dense_6_loss: 0.6682 - val_dense_4_acc: 0.5417 - val_dense_5_acc: 0.2083 - val_dense_6_acc: 0.5208
Epoch 13/20
Epoch 00012: val_loss did not improve
158s - loss: 7.0534 - dense_4_loss: 2.2762 - dense_5_loss: 3.4746 - dense_6_loss: 1.3026 - dense_4_acc: 0.4304 - dense_5_acc: 0.2010 - dense_6_acc: 0.5876 - val_loss: 5.7994 - val_dense_4_loss: 2.0686 - val_dense_5_loss: 3.1149 - val_dense_6_loss: 0.6159 - val_dense_4_acc: 0.1667 - val_dense_5_acc: 0.1667 - val_dense_6_acc: 0.6042
Epoch 14/20
Epoch 00013: val_loss did not improve
153s - loss: 5.8101 - dense_4_loss: 1.6131 - dense_5_loss: 2.8927 - dense_6_loss: 1.3043 - dense_4_acc: 0.5155 - dense_5_acc: 0.2706 - dense_6_acc: 0.5902 - val_loss: 4.3816 - val_dense_4_loss: 1.5038 - val_dense_5_loss: 2.1673 - val_dense_6_loss: 0.7104 - val_dense_4_acc: 0.5208 - val_dense_5_acc: 0.2083 - val_dense_6_acc: 0.5000
Epoch 15/20
Epoch 00014: val_loss did not improve
121s - loss: 16.6632 - dense_4_loss: 4.9403 - dense_5_loss: 7.2972 - dense_6_loss: 4.4256 - dense_4_acc: 0.4588 - dense_5_acc: 0.1521 - dense_6_acc: 0.4768 - val_loss: 27.3180 - val_dense_4_loss: 7.3875 - val_dense_5_loss: 12.2751 - val_dense_6_loss: 7.6554 - val_dense_4_acc: 0.5417 - val_dense_5_acc: 0.0833 - val_dense_6_acc: 0.5208
Epoch 16/20
Epoch 00015: val_loss did not improve
135s - loss: 27.2739 - dense_4_loss: 6.6434 - dense_5_loss: 12.5323 - dense_6_loss: 8.0982 - dense_4_acc: 0.5876 - dense_5_acc: 0.1856 - dense_6_acc: 0.4974 - val_loss: 31.3049 - val_dense_4_loss: 12.0858 - val_dense_5_loss: 13.0960 - val_dense_6_loss: 6.1231 - val_dense_4_acc: 0.2500 - val_dense_5_acc: 0.1875 - val_dense_6_acc: 0.4792
Epoch 17/20
Epoch 00016: val_loss did not improve
150s - loss: 33.3298 - dense_4_loss: 12.3481 - dense_5_loss: 13.0856 - dense_6_loss: 7.8961 - dense_4_acc: 0.2320 - dense_5_acc: 0.1881 - dense_6_acc: 0.5000 - val_loss: 33.5794 - val_dense_4_loss: 12.0886 - val_dense_5_loss: 13.0960 - val_dense_6_loss: 8.3948 - val_dense_4_acc: 0.2500 - val_dense_5_acc: 0.1875 - val_dense_6_acc: 0.4792
Epoch 18/20
Epoch 00017: val_loss did not improve
139s - loss: 33.6901 - dense_4_loss: 12.5871 - dense_5_loss: 13.0856 - dense_6_loss: 8.0175 - dense_4_acc: 0.2191 - dense_5_acc: 0.1881 - dense_6_acc: 0.5026 - val_loss: 33.5794 - val_dense_4_loss: 12.0886 - val_dense_5_loss: 13.0960 - val_dense_6_loss: 8.3948 - val_dense_4_acc: 0.2500 - val_dense_5_acc: 0.1875 - val_dense_6_acc: 0.4792
Epoch 19/20
Epoch 00018: val_loss did not improve
135s - loss: 33.6901 - dense_4_loss: 12.5871 - dense_5_loss: 13.0856 - dense_6_loss: 8.0175 - dense_4_acc: 0.2191 - dense_5_acc: 0.1881 - dense_6_acc: 0.5026 - val_loss: 33.5794 - val_dense_4_loss: 12.0886 - val_dense_5_loss: 13.0960 - val_dense_6_loss: 8.3948 - val_dense_4_acc: 0.2500 - val_dense_5_acc: 0.1875 - val_dense_6_acc: 0.4792
Epoch 20/20
Epoch 00019: val_loss did not improve
133s - loss: 33.6901 - dense_4_loss: 12.5871 - dense_5_loss: 13.0856 - dense_6_loss: 8.0175 - dense_4_acc: 0.2191 - dense_5_acc: 0.1881 - dense_6_acc: 0.5026 - val_loss: 33.5794 - val_dense_4_loss: 12.0886 - val_dense_5_loss: 13.0960 - val_dense_6_loss: 8.3948 - val_dense_4_acc: 0.2500 - val_dense_5_acc: 0.1875 - val_dense_6_acc: 0.4792
In [38]:
hide_code
# Load the model with the best validation accuracy
gray_multi_model.load_weights('weights.best.decor.gray_multi_model.hdf5')
# Calculate classification accuracy on the testing set
gray_multi_scores = gray_multi_model.evaluate(x_test6, y_test6_list, verbose=0)

print("Scores: \n" , (gray_multi_scores))
print("Country label. Accuracy: %.2f%%" % (gray_multi_scores[4]*100))
print("Decor label. Accuracy: %.2f%%" % (gray_multi_scores[5]*100))
print("Type label. Accuracy: %.2f%%" % (gray_multi_scores[6]*100))
Scores: 
 [3.9903284724877803, 1.2824076097838732, 2.0234891297865887, 0.68443163438719146, 0.44897960156810529, 0.12244897966786307, 0.67346939140436601]
Country label. Accuracy: 44.90%
Decor label. Accuracy: 12.24%
Type label. Accuracy: 67.35%
In [43]:
hide_code
# Save previous experiment results for test points / 20 epochs
gray_multi_losses = np.array([3.9903284724877803, 3.5339538759114792, 3.9035351130427145, 
                              3.7997477687135035, 4.0967560203707949])
gray_multi_losses1 = np.array([1.2824076097838732, 1.1924295376758187, 1.2683943193785998, 
                               1.2840366728451786, 1.2667128188269479])
gray_multi_losses2 = np.array([2.0234891297865887, 1.7557842196250448, 1.9094033581869942, 
                               1.8746722863644969, 1.9847604109316457])
gray_multi_losses3 = np.array([0.68443163438719146, 0.58574000183416874, 0.72573739655163827, 
                               0.64103872922002048, 0.84528285021684613])
 
gray_multi_accuracies1 = np.array([0.44897960156810529, 0.48979592809871753, 0.44897960156810529, 
                                   0.44897960156810529, 0.44897960156810529])
gray_multi_accuracies2 = np.array([0.12244897966786307, 0.24489796222472676, 0.22448979622247267, 
                                   0.30612244928369714, 0.16326530619847532])
gray_multi_accuracies3 = np.array([0.67346939140436601, 0.65306123300474517, 0.32653061376542464, 
                                   0.67346939140436601, 0.32653061376542464])
In [44]:
hide_code
# Plot the results
plt.figure(figsize=(18, 6))
plt.plot(leaky_alphas, gray_multi_losses2, '-o', color='slategray', label = 'loss')
plt.plot(leaky_alphas, gray_multi_accuracies2, '-o', color='#4876ff', label = 'accuracy')
plt.xlabel('leaky alphas')
plt.ylabel('results')
plt.legend()
plt.title('Decor Recognition. Multi-Label Model. Test Results for Grayscaled Images: Loss Function and Accuracy');