Deep Learning

Practice Projects

P2 Additional: Noise Reduction for Multi-Label Classification

Style and Libraries

In [1]:
%%html
<style>
@import url('https://fonts.googleapis.com/css?family=Orbitron|Roboto');
body {background-color: aliceblue;} 
a {color: #4876ff; font-family: 'Roboto';} 
h1 {color: #348ABD; font-family: 'Orbitron'; text-shadow: 4px 4px 4px #ccc;} 
h2, h3 {color: slategray; font-family: 'Roboto'; text-shadow: 4px 4px 4px #ccc;}
h4 {color: #348ABD; font-family: 'Orbitron';}
span {text-shadow: 4px 4px 4px #ccc;}
div.output_prompt, div.output_area pre {color: slategray;}
div.input_prompt, div.output_subarea {color: #4876ff;}      
div.output_stderr pre {background-color: aliceblue;}  
div.output_stderr {background-color: slategrey;}                        
</style>
<script>
code_show = true; 
function code_display() {
    if (code_show) {
        $('div.input').each(function(id) {
            if (id == 0 || $(this).html().indexOf('hide_code') > -1) {$(this).hide();}
        });
        $('div.output_prompt').css('opacity', 0);
    } else {
        $('div.input').each(function(id) {$(this).show();});
        $('div.output_prompt').css('opacity', 1);
    };
    code_show = !code_show;
} 
$(document).ready(code_display);
</script>
<form action="javascript: code_display()">
<input style="color: #348ABD; background: aliceblue; opacity: 0.8;" \ 
type="submit" value="Click to display or hide code cells">
</form>                 
In [2]:
hide_code = ''
import numpy as np 
import pandas as pd
import math

import tensorflow as tf

from sklearn.model_selection import train_test_split
from keras.utils import to_categorical

import h5py
import cv2

from keras.models import Sequential, load_model, Model
from keras.layers import Input, UpSampling2D
from keras.layers import Dense, LSTM, GlobalAveragePooling1D, GlobalAveragePooling2D
from keras.layers import Activation, Flatten, Dropout, BatchNormalization
from keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D

import matplotlib.pylab as plt
from matplotlib import cm
%matplotlib inline
Using TensorFlow backend.

Step 1. Load and Explore the Data

In [3]:
hide_code
# Read the h5 file
f = h5py.File('LetterColorImages_23.h5', 'r')

# List all groups
keys = list(f.keys())
keys
Out[3]:
['backgrounds', 'images', 'labels']
In [4]:
hide_code
# Create tensors and targets
backgrounds = np.array(f[keys[0]])
tensors = np.array(f[keys[1]])
targets = np.array(f[keys[2]])
print ('Tensor shape:', tensors.shape)
print ('Target shape', targets.shape)
print ('Background shape:', backgrounds.shape)
Tensor shape: (12540, 32, 32, 3)
Target shape (12540,)
Background shape: (12540,)
In [5]:
hide_code
# Plot letter images
fig, ax = plt.subplots(figsize=(18, 3), nrows=1, ncols=5, sharex=True, sharey=True,)
ax = ax.flatten()
for i in range(5):
    image = tensors[i*2000]/255
    ax[i].imshow(image)

ax[0].set_xticks([])
ax[0].set_yticks([])
plt.tight_layout()
plt.gcf()
ax[2].set_title('Examples of letters', fontsize=25);

Step 2. Preprocess

In [6]:
hide_code
# Normalize the tensors
tensors = tensors.astype('float32')/255
In [7]:
hide_code
# Grayscaled tensors
gray_tensors = np.dot(tensors[...,:3], [0.299, 0.587, 0.114])
gray_tensors = gray_tensors.reshape(-1, 32, 32, 1)
print ('Grayscaled Tensor shape:', gray_tensors.shape)
Grayscaled Tensor shape: (12540, 32, 32, 1)
In [8]:
hide_code
# One-hot encode the targets, started from the zero label
cat_targets = to_categorical(np.array(targets-1), 33)
cat_targets.shape
Out[8]:
(12540, 33)
In [9]:
hide_code
# One-hot encode the background targets
backgrounds = to_categorical(backgrounds-2,2)
backgrounds.shape
Out[9]:
(12540, 2)
In [13]:
hide_code
# Create multi-label targets
back_targets = np.concatenate((cat_targets, backgrounds), axis=1)
back_targets.shape
Out[13]:
(12540, 35)
In [14]:
hide_code
# Split the grayscaled data
x_train, x_test, y_train, y_test = train_test_split(gray_tensors, cat_targets, 
                                                    test_size = 0.2, 
                                                    random_state = 1)

n = int(len(x_test)/2)
x_valid, y_valid = x_test[:n], y_test[:n]
x_test, y_test = x_test[n:], y_test[n:]
In [15]:
hide_code
# Split the grayscaled data
x_train2, x_test2, y_train2, y_test2 = train_test_split(gray_tensors, back_targets, 
                                                        test_size = 0.2, 
                                                        random_state = 1)
n = int(len(x_test2)/2)
x_valid2, y_valid2 = x_test2[:n], y_test2[:n]
x_test2, y_test2 = x_test2[n:], y_test2[n:]

Step 3. Create Neural Networks for Noise Reduction

TensorFlow; Compressed Images

In [26]:
hide_code
# NN for noise reduction

inputs_ = tf.placeholder(tf.float32, (None, 32, 32, 1), name='inputs')
targets_ = tf.placeholder(tf.float32, (None, 32, 32, 1), name='targets')

### Encoder
conv1 = tf.layers.conv2d(inputs_, 32, (3,3), padding='same', activation=tf.nn.relu)
# Now 32x32x32
maxpool1 = tf.layers.max_pooling2d(conv1, (2,2), (2,2), padding='same')
# Now 16x16x32
conv2 = tf.layers.conv2d(maxpool1, 16, (3,3), padding='same', activation=tf.nn.relu)
# Now 16x16x16
maxpool2 = tf.layers.max_pooling2d(conv2, (2,2), (2,2), padding='same')
# Now 8x8x16
conv3 = tf.layers.conv2d(maxpool2, 8, (3,3), padding='same', activation=tf.nn.relu)
# Now 8x8x8
encoded = tf.layers.max_pooling2d(conv3, (2,2), (2,2), padding='same')
# Now 4x4x8

### Decoder
upsample1 = tf.image.resize_nearest_neighbor(encoded, (8,8))
# Now 8x8x8
conv4 = tf.layers.conv2d(upsample1, 8, (3,3), padding='same', activation=tf.nn.relu)
# Now 8x8x8
upsample2 = tf.image.resize_nearest_neighbor(conv4, (16,16))
# Now 16x16x16
conv5 = tf.layers.conv2d(upsample2, 16, (3,3), padding='same', activation=tf.nn.relu)
# Now 16x16x16
upsample3 = tf.image.resize_nearest_neighbor(conv5, (32,32))
# Now 32x32x16
conv6 = tf.layers.conv2d(upsample3, 32, (3,3), padding='same', activation=tf.nn.relu)
# Now 32x32x32

logits = tf.layers.conv2d(conv6, 1, (3,3), padding='same', activation=None)
# Now 32x32x1

decoded = tf.nn.sigmoid(logits, name='decoded')

loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits)
cost = tf.reduce_mean(loss)
opt = tf.train.AdamOptimizer(0.001).minimize(cost)
In [27]:
hide_code
# Function for creating batches
def get_batches(images, batch_size):
    current_index = 0
    while current_index + batch_size <= images.shape[0]:
        data_batch = images[current_index:current_index + batch_size]
        current_index += batch_size
        yield data_batch    
In [28]:
hide_code
# Define parameters
epochs = 200
batch_size = 64
train_step = 0
In [29]:
hide_code
# Run the tensorflow session 
sess = tf.Session()
sess.run(tf.global_variables_initializer())
gray_tensors1 = np.copy(gray_tensors)

for e in range(epochs):
    for batch_images in get_batches(gray_tensors1, batch_size):
        train_step += 1
        batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: batch_images,
                                                         targets_: batch_images})
        if train_step % 200 == 0:
            print("Epoch: {}/{}...".format(e+1, epochs),
                  "Training loss: {:.4f}".format(batch_cost))
Epoch: 2/200... Training loss: 0.6997
Epoch: 3/200... Training loss: 0.6901
Epoch: 4/200... Training loss: 0.6832
Epoch: 5/200... Training loss: 0.6757
Epoch: 6/200... Training loss: 0.6339
Epoch: 7/200... Training loss: 0.6196
Epoch: 8/200... Training loss: 0.6277
Epoch: 9/200... Training loss: 0.6410
Epoch: 10/200... Training loss: 0.6215
Epoch: 11/200... Training loss: 0.6777
Epoch: 12/200... Training loss: 0.6821
Epoch: 13/200... Training loss: 0.6010
Epoch: 14/200... Training loss: 0.6749
Epoch: 15/200... Training loss: 0.6467
Epoch: 16/200... Training loss: 0.6237
Epoch: 17/200... Training loss: 0.6342
Epoch: 18/200... Training loss: 0.6777
Epoch: 19/200... Training loss: 0.6800
Epoch: 20/200... Training loss: 0.6152
Epoch: 21/200... Training loss: 0.6286
Epoch: 22/200... Training loss: 0.6786
Epoch: 23/200... Training loss: 0.6302
Epoch: 24/200... Training loss: 0.6364
Epoch: 25/200... Training loss: 0.6064
Epoch: 26/200... Training loss: 0.6756
Epoch: 27/200... Training loss: 0.6363
Epoch: 28/200... Training loss: 0.6236
Epoch: 29/200... Training loss: 0.6229
Epoch: 30/200... Training loss: 0.6265
Epoch: 31/200... Training loss: 0.6606
Epoch: 32/200... Training loss: 0.6058
Epoch: 33/200... Training loss: 0.5999
Epoch: 34/200... Training loss: 0.6440
Epoch: 35/200... Training loss: 0.6732
Epoch: 36/200... Training loss: 0.6040
Epoch: 37/200... Training loss: 0.6139
Epoch: 38/200... Training loss: 0.6780
Epoch: 39/200... Training loss: 0.6724
Epoch: 40/200... Training loss: 0.6783
Epoch: 42/200... Training loss: 0.6807
Epoch: 43/200... Training loss: 0.6768
Epoch: 44/200... Training loss: 0.6787
Epoch: 45/200... Training loss: 0.6733
Epoch: 46/200... Training loss: 0.6330
Epoch: 47/200... Training loss: 0.6189
Epoch: 48/200... Training loss: 0.6265
Epoch: 49/200... Training loss: 0.6399
Epoch: 50/200... Training loss: 0.6200
Epoch: 51/200... Training loss: 0.6765
Epoch: 52/200... Training loss: 0.6814
Epoch: 53/200... Training loss: 0.6000
Epoch: 54/200... Training loss: 0.6739
Epoch: 55/200... Training loss: 0.6453
Epoch: 56/200... Training loss: 0.6231
Epoch: 57/200... Training loss: 0.6332
Epoch: 58/200... Training loss: 0.6763
Epoch: 59/200... Training loss: 0.6793
Epoch: 60/200... Training loss: 0.6136
Epoch: 61/200... Training loss: 0.6277
Epoch: 62/200... Training loss: 0.6778
Epoch: 63/200... Training loss: 0.6293
Epoch: 64/200... Training loss: 0.6358
Epoch: 65/200... Training loss: 0.6060
Epoch: 66/200... Training loss: 0.6749
Epoch: 67/200... Training loss: 0.6356
Epoch: 68/200... Training loss: 0.6226
Epoch: 69/200... Training loss: 0.6222
Epoch: 70/200... Training loss: 0.6257
Epoch: 71/200... Training loss: 0.6600
Epoch: 72/200... Training loss: 0.6052
Epoch: 73/200... Training loss: 0.5979
Epoch: 74/200... Training loss: 0.6433
Epoch: 75/200... Training loss: 0.6724
Epoch: 76/200... Training loss: 0.6032
Epoch: 77/200... Training loss: 0.6130
Epoch: 78/200... Training loss: 0.6774
Epoch: 79/200... Training loss: 0.6718
Epoch: 80/200... Training loss: 0.6776
Epoch: 82/200... Training loss: 0.6802
Epoch: 83/200... Training loss: 0.6762
Epoch: 84/200... Training loss: 0.6784
Epoch: 85/200... Training loss: 0.6731
Epoch: 86/200... Training loss: 0.6328
Epoch: 87/200... Training loss: 0.6186
Epoch: 88/200... Training loss: 0.6264
Epoch: 89/200... Training loss: 0.6398
Epoch: 90/200... Training loss: 0.6197
Epoch: 91/200... Training loss: 0.6763
Epoch: 92/200... Training loss: 0.6812
Epoch: 93/200... Training loss: 0.5997
Epoch: 94/200... Training loss: 0.6738
Epoch: 95/200... Training loss: 0.6450
Epoch: 96/200... Training loss: 0.6229
Epoch: 97/200... Training loss: 0.6329
Epoch: 98/200... Training loss: 0.6759
Epoch: 99/200... Training loss: 0.6793
Epoch: 100/200... Training loss: 0.6132
Epoch: 101/200... Training loss: 0.6273
Epoch: 102/200... Training loss: 0.6776
Epoch: 103/200... Training loss: 0.6290
Epoch: 104/200... Training loss: 0.6355
Epoch: 105/200... Training loss: 0.6058
Epoch: 106/200... Training loss: 0.6747
Epoch: 107/200... Training loss: 0.6354
Epoch: 108/200... Training loss: 0.6224
Epoch: 109/200... Training loss: 0.6220
Epoch: 110/200... Training loss: 0.6255
Epoch: 111/200... Training loss: 0.6598
Epoch: 112/200... Training loss: 0.6051
Epoch: 113/200... Training loss: 0.5971
Epoch: 114/200... Training loss: 0.6429
Epoch: 115/200... Training loss: 0.6720
Epoch: 116/200... Training loss: 0.6028
Epoch: 117/200... Training loss: 0.6124
Epoch: 118/200... Training loss: 0.6770
Epoch: 119/200... Training loss: 0.6715
Epoch: 120/200... Training loss: 0.6772
Epoch: 122/200... Training loss: 0.6801
Epoch: 123/200... Training loss: 0.6761
Epoch: 124/200... Training loss: 0.6782
Epoch: 125/200... Training loss: 0.6729
Epoch: 126/200... Training loss: 0.6327
Epoch: 127/200... Training loss: 0.6184
Epoch: 128/200... Training loss: 0.6263
Epoch: 129/200... Training loss: 0.6396
Epoch: 130/200... Training loss: 0.6195
Epoch: 131/200... Training loss: 0.6762
Epoch: 132/200... Training loss: 0.6812
Epoch: 133/200... Training loss: 0.5997
Epoch: 134/200... Training loss: 0.6737
Epoch: 135/200... Training loss: 0.6450
Epoch: 136/200... Training loss: 0.6229
Epoch: 137/200... Training loss: 0.6330
Epoch: 138/200... Training loss: 0.6758
Epoch: 139/200... Training loss: 0.6792
Epoch: 140/200... Training loss: 0.6130
Epoch: 141/200... Training loss: 0.6270
Epoch: 142/200... Training loss: 0.6774
Epoch: 143/200... Training loss: 0.6288
Epoch: 144/200... Training loss: 0.6354
Epoch: 145/200... Training loss: 0.6057
Epoch: 146/200... Training loss: 0.6747
Epoch: 147/200... Training loss: 0.6353
Epoch: 148/200... Training loss: 0.6222
Epoch: 149/200... Training loss: 0.6219
Epoch: 150/200... Training loss: 0.6254
Epoch: 151/200... Training loss: 0.6597
Epoch: 152/200... Training loss: 0.6051
Epoch: 153/200... Training loss: 0.5965
Epoch: 154/200... Training loss: 0.6426
Epoch: 155/200... Training loss: 0.6715
Epoch: 156/200... Training loss: 0.6024
Epoch: 157/200... Training loss: 0.6116
Epoch: 158/200... Training loss: 0.6764
Epoch: 159/200... Training loss: 0.6710
Epoch: 160/200... Training loss: 0.6767
Epoch: 162/200... Training loss: 0.6801
Epoch: 163/200... Training loss: 0.6761
Epoch: 164/200... Training loss: 0.6783
Epoch: 165/200... Training loss: 0.6729
Epoch: 166/200... Training loss: 0.6328
Epoch: 167/200... Training loss: 0.6184
Epoch: 168/200... Training loss: 0.6260
Epoch: 169/200... Training loss: 0.6396
Epoch: 170/200... Training loss: 0.6195
Epoch: 171/200... Training loss: 0.6761
Epoch: 172/200... Training loss: 0.6812
Epoch: 173/200... Training loss: 0.5996
Epoch: 174/200... Training loss: 0.6738
Epoch: 175/200... Training loss: 0.6450
Epoch: 176/200... Training loss: 0.6229
Epoch: 177/200... Training loss: 0.6328
Epoch: 178/200... Training loss: 0.6757
Epoch: 179/200... Training loss: 0.6790
Epoch: 180/200... Training loss: 0.6127
Epoch: 181/200... Training loss: 0.6269
Epoch: 182/200... Training loss: 0.6774
Epoch: 183/200... Training loss: 0.6287
Epoch: 184/200... Training loss: 0.6353
Epoch: 185/200... Training loss: 0.6055
Epoch: 186/200... Training loss: 0.6746
Epoch: 187/200... Training loss: 0.6351
Epoch: 188/200... Training loss: 0.6222
Epoch: 189/200... Training loss: 0.6219
Epoch: 190/200... Training loss: 0.6253
Epoch: 191/200... Training loss: 0.6596
Epoch: 192/200... Training loss: 0.6051
Epoch: 193/200... Training loss: 0.5957
Epoch: 194/200... Training loss: 0.6421
Epoch: 195/200... Training loss: 0.6710
Epoch: 196/200... Training loss: 0.6020
Epoch: 197/200... Training loss: 0.6109
Epoch: 198/200... Training loss: 0.6760
Epoch: 199/200... Training loss: 0.6706
Epoch: 200/200... Training loss: 0.6762
In [30]:
hide_code
# Display original grayscaled images
fig, ax = plt.subplots(figsize=(18, 3), nrows=1, ncols=5, sharex=True, sharey=True,)
ax = ax.flatten()
for i in range(5):
    image = gray_tensors[i*2000].reshape(32,32)
    ax[i].imshow(image, cmap=cm.bone)

ax[0].set_xticks([])
ax[0].set_yticks([])
plt.tight_layout()
plt.gcf()
ax[2].set_title('Examples of grayscaled images', fontsize=25);
In [31]:
hide_code
# Display output grayscaled images
fig, ax = plt.subplots(figsize=(18, 3), nrows=1, ncols=5, sharex=True, sharey=True,)
ax = ax.flatten()
for i in range(5):
    image = gray_tensors1[i*2000].reshape(32,32)
    ax[i].imshow(image, cmap=cm.bone)

ax[0].set_xticks([])
ax[0].set_yticks([])
plt.tight_layout()
plt.gcf()
ax[2].set_title('Examples of cleaned images. TensorFlow', fontsize=25);

Keras; Compressed Images

In [29]:
hide_code
def autoencoder():
    inputs = Input(shape=(32, 32, 1))
    
    # Encode
    x = Conv2D(32, 5, activation='relu', padding='same')(inputs)
    x = MaxPooling2D(padding='same')(x)
    
    x = Conv2D(16, 5, activation='relu', padding='same')(x)
    x = MaxPooling2D(padding='same')(x)
    
    x = Conv2D(8, 5, activation='relu', padding='same')(x)
    encoded = MaxPooling2D(padding='same')(x)    
    
    # Decode
    x = Conv2D(8, 5, activation='relu', padding='same')(encoded)
    x = UpSampling2D()(x)
    
    x = Conv2D(16, 5, activation='relu', padding='same')(x)
    x = UpSampling2D()(x)
    
    x = Conv2D(32, 5, activation='relu', padding='same')(x) 
    x = UpSampling2D()(x)
    
    decoded = Conv2D(1, 3, activation='sigmoid', padding='same')(x)
    
    # Autoencoder
    autoencoder = Model(inputs, decoded)
    
    # Compile
    autoencoder.compile(optimizer='nadam', loss='binary_crossentropy')
    
    return autoencoder

autoencoder = autoencoder()
In [30]:
hide_code
# Display autoencoder architecture 
autoencoder.summary()
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_3 (InputLayer)         (None, 32, 32, 1)         0         
_________________________________________________________________
conv2d_15 (Conv2D)           (None, 32, 32, 32)        832       
_________________________________________________________________
max_pooling2d_7 (MaxPooling2 (None, 16, 16, 32)        0         
_________________________________________________________________
conv2d_16 (Conv2D)           (None, 16, 16, 16)        12816     
_________________________________________________________________
max_pooling2d_8 (MaxPooling2 (None, 8, 8, 16)          0         
_________________________________________________________________
conv2d_17 (Conv2D)           (None, 8, 8, 8)           3208      
_________________________________________________________________
max_pooling2d_9 (MaxPooling2 (None, 4, 4, 8)           0         
_________________________________________________________________
conv2d_18 (Conv2D)           (None, 4, 4, 8)           1608      
_________________________________________________________________
up_sampling2d_7 (UpSampling2 (None, 8, 8, 8)           0         
_________________________________________________________________
conv2d_19 (Conv2D)           (None, 8, 8, 16)          3216      
_________________________________________________________________
up_sampling2d_8 (UpSampling2 (None, 16, 16, 16)        0         
_________________________________________________________________
conv2d_20 (Conv2D)           (None, 16, 16, 32)        12832     
_________________________________________________________________
up_sampling2d_9 (UpSampling2 (None, 32, 32, 32)        0         
_________________________________________________________________
conv2d_21 (Conv2D)           (None, 32, 32, 1)         289       
=================================================================
Total params: 34,801
Trainable params: 34,801
Non-trainable params: 0
_________________________________________________________________
In [31]:
hide_code
# Train autoencoder
autoencoder_history = autoencoder.fit(x_train, x_train, 
                                      epochs=200, batch_size=64, verbose=2,
                                      validation_data=(x_valid, x_valid))
Train on 10032 samples, validate on 1254 samples
Epoch 1/200
65s - loss: 0.6665 - val_loss: 0.6510
Epoch 2/200
68s - loss: 0.6518 - val_loss: 0.6567
Epoch 3/200
60s - loss: 0.6476 - val_loss: 0.6513
Epoch 4/200
62s - loss: 0.6475 - val_loss: 0.6489
Epoch 5/200
69s - loss: 0.6467 - val_loss: 0.6476
Epoch 6/200
77s - loss: 0.6466 - val_loss: 0.6470
Epoch 7/200
71s - loss: 0.6457 - val_loss: 0.6490
Epoch 8/200
89s - loss: 0.6456 - val_loss: 0.6455
Epoch 9/200
101s - loss: 0.6454 - val_loss: 0.6489
Epoch 10/200
101s - loss: 0.6450 - val_loss: 0.6457
Epoch 11/200
74s - loss: 0.6449 - val_loss: 0.6459
Epoch 12/200
72s - loss: 0.6447 - val_loss: 0.6462
Epoch 13/200
80s - loss: 0.6447 - val_loss: 0.6451
Epoch 14/200
83s - loss: 0.6444 - val_loss: 0.6468
Epoch 15/200
80s - loss: 0.6443 - val_loss: 0.6453
Epoch 16/200
84s - loss: 0.6444 - val_loss: 0.6446
Epoch 17/200
77s - loss: 0.6441 - val_loss: 0.6450
Epoch 18/200
75s - loss: 0.6442 - val_loss: 0.6447
Epoch 19/200
79s - loss: 0.6440 - val_loss: 0.6454
Epoch 20/200
76s - loss: 0.6440 - val_loss: 0.6452
Epoch 21/200
66s - loss: 0.6438 - val_loss: 0.6460
Epoch 22/200
64s - loss: 0.6439 - val_loss: 0.6448
Epoch 23/200
64s - loss: 0.6438 - val_loss: 0.6444
Epoch 24/200
64s - loss: 0.6436 - val_loss: 0.6450
Epoch 25/200
64s - loss: 0.6437 - val_loss: 0.6450
Epoch 26/200
64s - loss: 0.6437 - val_loss: 0.6447
Epoch 27/200
64s - loss: 0.6434 - val_loss: 0.6443
Epoch 28/200
66s - loss: 0.6435 - val_loss: 0.6450
Epoch 29/200
64s - loss: 0.6435 - val_loss: 0.6445
Epoch 30/200
64s - loss: 0.6435 - val_loss: 0.6446
Epoch 31/200
64s - loss: 0.6434 - val_loss: 0.6445
Epoch 32/200
63s - loss: 0.6434 - val_loss: 0.6454
Epoch 33/200
64s - loss: 0.6432 - val_loss: 0.6445
Epoch 34/200
64s - loss: 0.6433 - val_loss: 0.6444
Epoch 35/200
63s - loss: 0.6432 - val_loss: 0.6441
Epoch 36/200
63s - loss: 0.6432 - val_loss: 0.6441
Epoch 37/200
63s - loss: 0.6431 - val_loss: 0.6444
Epoch 38/200
63s - loss: 0.6432 - val_loss: 0.6440
Epoch 39/200
64s - loss: 0.6430 - val_loss: 0.6446
Epoch 40/200
64s - loss: 0.6429 - val_loss: 0.6459
Epoch 41/200
63s - loss: 0.6430 - val_loss: 0.6438
Epoch 42/200
63s - loss: 0.6429 - val_loss: 0.6438
Epoch 43/200
64s - loss: 0.6429 - val_loss: 0.6443
Epoch 44/200
64s - loss: 0.6428 - val_loss: 0.6438
Epoch 45/200
63s - loss: 0.6428 - val_loss: 0.6440
Epoch 46/200
63s - loss: 0.6428 - val_loss: 0.6437
Epoch 47/200
63s - loss: 0.6428 - val_loss: 0.6442
Epoch 48/200
64s - loss: 0.6427 - val_loss: 0.6437
Epoch 49/200
64s - loss: 0.6427 - val_loss: 0.6436
Epoch 50/200
64s - loss: 0.6426 - val_loss: 0.6439
Epoch 51/200
63s - loss: 0.6426 - val_loss: 0.6436
Epoch 52/200
63s - loss: 0.6426 - val_loss: 0.6436
Epoch 53/200
64s - loss: 0.6425 - val_loss: 0.6437
Epoch 54/200
64s - loss: 0.6425 - val_loss: 0.6441
Epoch 55/200
63s - loss: 0.6425 - val_loss: 0.6439
Epoch 56/200
64s - loss: 0.6424 - val_loss: 0.6440
Epoch 57/200
63s - loss: 0.6424 - val_loss: 0.6436
Epoch 58/200
64s - loss: 0.6424 - val_loss: 0.6434
Epoch 59/200
64s - loss: 0.6424 - val_loss: 0.6436
Epoch 60/200
63s - loss: 0.6423 - val_loss: 0.6434
Epoch 61/200
64s - loss: 0.6425 - val_loss: 0.6449
Epoch 62/200
66s - loss: 0.6423 - val_loss: 0.6437
Epoch 63/200
66s - loss: 0.6422 - val_loss: 0.6434
Epoch 64/200
65s - loss: 0.6422 - val_loss: 0.6434
Epoch 65/200
64s - loss: 0.6423 - val_loss: 0.6437
Epoch 66/200
63s - loss: 0.6422 - val_loss: 0.6435
Epoch 67/200
63s - loss: 0.6422 - val_loss: 0.6433
Epoch 68/200
64s - loss: 0.6421 - val_loss: 0.6434
Epoch 69/200
63s - loss: 0.6422 - val_loss: 0.6432
Epoch 70/200
63s - loss: 0.6421 - val_loss: 0.6437
Epoch 71/200
64s - loss: 0.6422 - val_loss: 0.6432
Epoch 72/200
64s - loss: 0.6420 - val_loss: 0.6432
Epoch 73/200
64s - loss: 0.6420 - val_loss: 0.6433
Epoch 74/200
64s - loss: 0.6421 - val_loss: 0.6432
Epoch 75/200
64s - loss: 0.6420 - val_loss: 0.6432
Epoch 76/200
64s - loss: 0.6421 - val_loss: 0.6432
Epoch 77/200
64s - loss: 0.6420 - val_loss: 0.6434
Epoch 78/200
63s - loss: 0.6420 - val_loss: 0.6431
Epoch 79/200
63s - loss: 0.6420 - val_loss: 0.6432
Epoch 80/200
63s - loss: 0.6420 - val_loss: 0.6436
Epoch 81/200
63s - loss: 0.6420 - val_loss: 0.6432
Epoch 82/200
63s - loss: 0.6420 - val_loss: 0.6433
Epoch 83/200
63s - loss: 0.6420 - val_loss: 0.6435
Epoch 84/200
63s - loss: 0.6420 - val_loss: 0.6430
Epoch 85/200
64s - loss: 0.6419 - val_loss: 0.6430
Epoch 86/200
64s - loss: 0.6419 - val_loss: 0.6435
Epoch 87/200
63s - loss: 0.6419 - val_loss: 0.6431
Epoch 88/200
63s - loss: 0.6419 - val_loss: 0.6433
Epoch 89/200
63s - loss: 0.6419 - val_loss: 0.6430
Epoch 90/200
64s - loss: 0.6418 - val_loss: 0.6431
Epoch 91/200
64s - loss: 0.6419 - val_loss: 0.6431
Epoch 92/200
64s - loss: 0.6418 - val_loss: 0.6435
Epoch 93/200
64s - loss: 0.6419 - val_loss: 0.6432
Epoch 94/200
63s - loss: 0.6418 - val_loss: 0.6431
Epoch 95/200
64s - loss: 0.6418 - val_loss: 0.6431
Epoch 96/200
63s - loss: 0.6418 - val_loss: 0.6430
Epoch 97/200
63s - loss: 0.6418 - val_loss: 0.6430
Epoch 98/200
63s - loss: 0.6418 - val_loss: 0.6441
Epoch 99/200
63s - loss: 0.6418 - val_loss: 0.6429
Epoch 100/200
63s - loss: 0.6417 - val_loss: 0.6429
Epoch 101/200
63s - loss: 0.6418 - val_loss: 0.6429
Epoch 102/200
64s - loss: 0.6417 - val_loss: 0.6432
Epoch 103/200
63s - loss: 0.6417 - val_loss: 0.6430
Epoch 104/200
64s - loss: 0.6418 - val_loss: 0.6429
Epoch 105/200
64s - loss: 0.6417 - val_loss: 0.6430
Epoch 106/200
63s - loss: 0.6417 - val_loss: 0.6441
Epoch 107/200
63s - loss: 0.6417 - val_loss: 0.6430
Epoch 108/200
63s - loss: 0.6417 - val_loss: 0.6428
Epoch 109/200
63s - loss: 0.6417 - val_loss: 0.6428
Epoch 110/200
64s - loss: 0.6417 - val_loss: 0.6430
Epoch 111/200
64s - loss: 0.6417 - val_loss: 0.6430
Epoch 112/200
63s - loss: 0.6417 - val_loss: 0.6428
Epoch 113/200
63s - loss: 0.6417 - val_loss: 0.6430
Epoch 114/200
64s - loss: 0.6417 - val_loss: 0.6430
Epoch 115/200
63s - loss: 0.6417 - val_loss: 0.6434
Epoch 116/200
64s - loss: 0.6416 - val_loss: 0.6430
Epoch 117/200
64s - loss: 0.6417 - val_loss: 0.6430
Epoch 118/200
64s - loss: 0.6416 - val_loss: 0.6430
Epoch 119/200
63s - loss: 0.6417 - val_loss: 0.6429
Epoch 120/200
64s - loss: 0.6416 - val_loss: 0.6433
Epoch 121/200
65s - loss: 0.6416 - val_loss: 0.6432
Epoch 122/200
65s - loss: 0.6417 - val_loss: 0.6429
Epoch 123/200
64s - loss: 0.6416 - val_loss: 0.6429
Epoch 124/200
61s - loss: 0.6417 - val_loss: 0.6428
Epoch 125/200
60s - loss: 0.6416 - val_loss: 0.6429
Epoch 126/200
62s - loss: 0.6416 - val_loss: 0.6429
Epoch 127/200
60s - loss: 0.6416 - val_loss: 0.6428
Epoch 128/200
60s - loss: 0.6416 - val_loss: 0.6428
Epoch 129/200
61s - loss: 0.6416 - val_loss: 0.6430
Epoch 130/200
60s - loss: 0.6416 - val_loss: 0.6429
Epoch 131/200
61s - loss: 0.6417 - val_loss: 0.6433
Epoch 132/200
62s - loss: 0.6416 - val_loss: 0.6428
Epoch 133/200
62s - loss: 0.6416 - val_loss: 0.6428
Epoch 134/200
62s - loss: 0.6416 - val_loss: 0.6429
Epoch 135/200
62s - loss: 0.6416 - val_loss: 0.6428
Epoch 136/200
62s - loss: 0.6416 - val_loss: 0.6428
Epoch 137/200
62s - loss: 0.6416 - val_loss: 0.6431
Epoch 138/200
63s - loss: 0.6416 - val_loss: 0.6428
Epoch 139/200
63s - loss: 0.6416 - val_loss: 0.6429
Epoch 140/200
62s - loss: 0.6416 - val_loss: 0.6428
Epoch 141/200
62s - loss: 0.6416 - val_loss: 0.6429
Epoch 142/200
62s - loss: 0.6415 - val_loss: 0.6428
Epoch 143/200
62s - loss: 0.6416 - val_loss: 0.6430
Epoch 144/200
62s - loss: 0.6415 - val_loss: 0.6427
Epoch 145/200
62s - loss: 0.6416 - val_loss: 0.6429
Epoch 146/200
64s - loss: 0.6415 - val_loss: 0.6429
Epoch 147/200
63s - loss: 0.6415 - val_loss: 0.6429
Epoch 148/200
62s - loss: 0.6415 - val_loss: 0.6430
Epoch 149/200
62s - loss: 0.6416 - val_loss: 0.6433
Epoch 150/200
62s - loss: 0.6415 - val_loss: 0.6428
Epoch 151/200
63s - loss: 0.6416 - val_loss: 0.6428
Epoch 152/200
63s - loss: 0.6415 - val_loss: 0.6429
Epoch 153/200
62s - loss: 0.6415 - val_loss: 0.6427
Epoch 154/200
62s - loss: 0.6415 - val_loss: 0.6429
Epoch 155/200
62s - loss: 0.6415 - val_loss: 0.6430
Epoch 156/200
62s - loss: 0.6415 - val_loss: 0.6430
Epoch 157/200
63s - loss: 0.6415 - val_loss: 0.6429
Epoch 158/200
62s - loss: 0.6415 - val_loss: 0.6428
Epoch 159/200
62s - loss: 0.6415 - val_loss: 0.6428
Epoch 160/200
63s - loss: 0.6415 - val_loss: 0.6427
Epoch 161/200
62s - loss: 0.6415 - val_loss: 0.6427
Epoch 162/200
62s - loss: 0.6415 - val_loss: 0.6429
Epoch 163/200
62s - loss: 0.6415 - val_loss: 0.6428
Epoch 164/200
62s - loss: 0.6415 - val_loss: 0.6427
Epoch 165/200
62s - loss: 0.6415 - val_loss: 0.6430
Epoch 166/200
64s - loss: 0.6415 - val_loss: 0.6427
Epoch 167/200
63s - loss: 0.6415 - val_loss: 0.6427
Epoch 168/200
63s - loss: 0.6415 - val_loss: 0.6427
Epoch 169/200
63s - loss: 0.6414 - val_loss: 0.6427
Epoch 170/200
63s - loss: 0.6415 - val_loss: 0.6428
Epoch 171/200
63s - loss: 0.6415 - val_loss: 0.6426
Epoch 172/200
64s - loss: 0.6415 - val_loss: 0.6427
Epoch 173/200
65s - loss: 0.6414 - val_loss: 0.6426
Epoch 174/200
63s - loss: 0.6414 - val_loss: 0.6427
Epoch 175/200
63s - loss: 0.6415 - val_loss: 0.6432
Epoch 176/200
64s - loss: 0.6415 - val_loss: 0.6428
Epoch 177/200
65s - loss: 0.6414 - val_loss: 0.6426
Epoch 178/200
64s - loss: 0.6414 - val_loss: 0.6427
Epoch 179/200
62s - loss: 0.6414 - val_loss: 0.6429
Epoch 180/200
63s - loss: 0.6414 - val_loss: 0.6427
Epoch 181/200
63s - loss: 0.6414 - val_loss: 0.6427
Epoch 182/200
63s - loss: 0.6414 - val_loss: 0.6428
Epoch 183/200
63s - loss: 0.6414 - val_loss: 0.6427
Epoch 184/200
92s - loss: 0.6414 - val_loss: 0.6427
Epoch 185/200
79s - loss: 0.6414 - val_loss: 0.6428
Epoch 186/200
73s - loss: 0.6414 - val_loss: 0.6435
Epoch 187/200
68s - loss: 0.6414 - val_loss: 0.6426
Epoch 188/200
74s - loss: 0.6414 - val_loss: 0.6426
Epoch 189/200
72s - loss: 0.6414 - val_loss: 0.6429
Epoch 190/200
68s - loss: 0.6414 - val_loss: 0.6426
Epoch 191/200
77s - loss: 0.6414 - val_loss: 0.6428
Epoch 192/200
69s - loss: 0.6414 - val_loss: 0.6427
Epoch 193/200
80s - loss: 0.6414 - val_loss: 0.6426
Epoch 194/200
81s - loss: 0.6414 - val_loss: 0.6427
Epoch 195/200
80s - loss: 0.6414 - val_loss: 0.6426
Epoch 196/200
77s - loss: 0.6413 - val_loss: 0.6426
Epoch 197/200
82s - loss: 0.6414 - val_loss: 0.6428
Epoch 198/200
78s - loss: 0.6414 - val_loss: 0.6428
Epoch 199/200
65s - loss: 0.6414 - val_loss: 0.6428
Epoch 200/200
70s - loss: 0.6414 - val_loss: 0.6426
In [32]:
hide_code
# Plot training history
plt.figure(figsize=(18, 6))
plt.plot(autoencoder_history.history['loss'][2:], color='slategray', label = 'train')
plt.plot(autoencoder_history.history['val_loss'][2:], color='#4876ff', label = 'valid')
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend()
plt.title('Loss Function'); 
In [33]:
hide_code
# Create decoded images
x_test_decoded = autoencoder.predict(x_test)
In [34]:
hide_code
fig, ax = plt.subplots(figsize=(18, 3), nrows=1, ncols=5, sharex=True, sharey=True,)
ax = ax.flatten()
for i in range(5):
    image = x_test[i*150].reshape(32,32)
    ax[i].imshow(image, cmap=cm.bone)

ax[0].set_xticks([])
ax[0].set_yticks([])
plt.tight_layout()
plt.gcf()
ax[2].set_title('Examples of grayscaled letters', fontsize=25);
In [35]:
hide_code
fig, ax = plt.subplots(figsize=(18, 3), nrows=1, ncols=5, sharex=True, sharey=True,)
ax = ax.flatten()
for i in range(5):
    image = x_test_decoded[i*150].reshape(32,32)
    ax[i].imshow(image, cmap=cm.bone)

ax[0].set_xticks([])
ax[0].set_yticks([])
plt.tight_layout()
plt.gcf()
ax[2].set_title('Examples of cleaned letters. Keras', fontsize=25);