-
Notifications
You must be signed in to change notification settings - Fork 970
/
classifier-autoencoder-mnist-3.3.1.py
executable file
·142 lines (120 loc) · 4.52 KB
/
classifier-autoencoder-mnist-3.3.1.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
''' Autoencoder with Classifier
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import keras
from keras.layers import Activation, Dense, Dropout, Input, BatchNormalization
from keras.layers import Conv2D, MaxPooling2D, Flatten
from keras.layers import Reshape, Conv2DTranspose, UpSampling2D
from keras.models import Model
from keras.datasets import mnist
from keras.utils import to_categorical
from keras.utils import plot_model
from keras.callbacks import TensorBoard
from keras import backend as K
import math
import matplotlib.pyplot as plt
# MNIST dataset
(x_train, y_train), (x_test, y_test) = mnist.load_data()
num_labels = np.amax(y_train) + 1
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
image_size = x_train.shape[1]
x_train = np.reshape(x_train, [-1, image_size, image_size, 1])
x_test = np.reshape(x_test, [-1, image_size, image_size, 1])
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# Network parameters
input_shape = (image_size, image_size, 1)
batch_size = 128
kernel_size = 3
pool_size = 2
dropout = 0.4
filters = 16
latent_dim = 16
# Build the autoencoder model
# First build the encoder model
inputs = Input(shape=input_shape, name='encoder_input')
x = inputs
# Stack of BN-ReLU-Conv2D-MaxPooling blocks
for i in range(2):
x = BatchNormalization()(x)
x = Activation('relu')(x)
filters = filters * 2
x = Conv2D(filters=filters, kernel_size=kernel_size,
padding='same')(x)
x = MaxPooling2D()(x)
# Shape info needed to build decoder model
shape = x.shape.as_list()
# Generate a 16-dim latent vector
x = Flatten()(x)
latent = Dense(latent_dim, name='latent_vector')(x)
# Instantiate encoder model
encoder = Model(inputs, latent, name='encoder')
encoder.summary()
plot_model(encoder, to_file='classifier-encoder.png', show_shapes=True)
# Build the Decoder model
latent_inputs = Input(shape=(latent_dim,), name='decoder_input')
x = Dense(shape[1]*shape[2]*shape[3])(latent_inputs)
x = Reshape((shape[1], shape[2], shape[3]))(x)
# Stack of BN-ReLU-Transposed Conv2D-UpSampling blocks
for i in range(2):
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2DTranspose(filters=filters, kernel_size=kernel_size,
padding='same')(x)
x = UpSampling2D()(x)
filters = int(filters / 2)
x = Conv2DTranspose(filters=1, kernel_size=kernel_size,
padding='same')(x)
outputs = Activation('sigmoid', name='decoder_output')(x)
# Instantiate Decoder model
decoder = Model(latent_inputs, outputs, name='decoder')
decoder.summary()
plot_model(decoder, to_file='classifier-decoder.png', show_shapes=True)
# Classifier Model
latent_inputs = Input(shape=(latent_dim,), name='classifier_input')
x = Dense(512)(latent_inputs)
x = Activation('relu')(x)
x = Dropout(0.4)(x)
x = Dense(256)(x)
x = Activation('relu')(x)
x = Dropout(0.4)(x)
x = Dense(num_labels)(x)
classifier_outputs = Activation('softmax', name='classifier_output')(x)
classifier = Model(latent_inputs, classifier_outputs, name='classifier')
classifier.summary()
plot_model(classifier, to_file='classifier.png', show_shapes=True)
# Autoencoder = Encoder + Classifier/Decoder
# Instantiate autoencoder model
autoencoder = Model(inputs,
[classifier(encoder(inputs)), decoder(encoder(inputs))],
name='autodecoder')
autoencoder.summary()
plot_model(autoencoder, to_file='classifier-autoencoder.png', show_shapes=True)
# Mean Square Error (MSE) loss function, Adam optimizer
autoencoder.compile(loss=['categorical_crossentropy', 'mse'],
optimizer='adam',
metrics=['accuracy', 'mse'])
# Train the autoencoder for 1 epoch
autoencoder.fit(x_train, [y_train, x_train],
validation_data=(x_test, [y_test, x_test]),
epochs=2, batch_size=batch_size,
callbacks=[TensorBoard(log_dir='/tmp/autoencoder')])
# Predict the Autoencoder output from test data
y_predicted, x_decoded = autoencoder.predict(x_test)
print(np.argmax(y_predicted[:8], axis=1))
# Display the 1st 8 input and decoded images
imgs = np.concatenate([x_test[:8], x_decoded[:8]])
imgs = imgs.reshape((4, 4, image_size, image_size))
imgs = np.vstack([np.hstack(i) for i in imgs])
plt.figure()
plt.axis('off')
plt.title('Input: 1st 2 rows, Decoded: last 2 rows')
plt.imshow(imgs, interpolation='none', cmap='gray')
plt.savefig('input_and_decoded.png')
plt.show()
# latent = encoder.predict(x_test)
# print("Variance:", K.var(latent))