Last active
June 11, 2019 11:50
-
-
Save manashmandal/38bb5979bc4c79f0a06ff498c3e919b8 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def build_deep_autoencoder(img_shape,code_size=32): | |
"""PCA's deeper brother. See instructions above""" | |
H,W,C = img_shape | |
encoder = keras.models.Sequential() | |
encoder.add(L.InputLayer(img_shape)) | |
encoder.add(L.Flatten()) | |
encoder.add(L.Dense(code_size*8, activation='relu')) | |
encoder.add(L.Dense(code_size*4, activation='tanh')) | |
encoder.add(L.Dense(code_size*2, activation='tanh')) | |
encoder.add(L.Dense(code_size)) | |
decoder = keras.models.Sequential() | |
decoder.add(L.InputLayer((code_size,))) | |
decoder.add(L.Dense(code_size*2, activation='tanh')) | |
decoder.add(L.Dense(code_size*4, activation='tanh')) | |
decoder.add(L.Dense(code_size*8, activation='relu')) | |
decoder.add(L.Dense(np.prod(img_shape))) | |
decoder.add(L.Reshape(img_shape)) | |
return encoder,decoder | |
encoder,decoder = build_deep_autoencoder(img_shape,code_size=32) | |
# Sanity Check | |
get_dim = lambda layer: np.prod(layer.output_shape[1:]) | |
for code_size in [1,8,32,128,512,1024]: | |
encoder,decoder = build_deep_autoencoder(img_shape,code_size=code_size) | |
print("Testing code size %i" % code_size) | |
assert encoder.output_shape[1:]==(code_size,),"encoder must output a code of required size" | |
assert decoder.output_shape[1:]==img_shape, "decoder must output an image of valid shape" | |
assert len(encoder.trainable_weights)>=6, "encoder must contain at least 3 dense layers" | |
assert len(decoder.trainable_weights)>=6, "decoder must contain at least 3 dense layers" | |
for layer in encoder.layers + decoder.layers: | |
assert get_dim(layer) >= code_size, "Encoder layer %s is smaller than bottleneck (%i units)"%(layer.name,get_dim(layer)) | |
print("All tests passed!") | |
inp = L.Input(img_shape) | |
code = encoder(inp) | |
reconstruction = decoder(code) | |
deep_autoencoder = keras.models.Model(inp,reconstruction) | |
deep_autoencoder.compile('adamax','mse') | |
deep_autoencoder.fit(x=X_train,y=X_train,epochs=120, | |
validation_data=[X_test,X_test]) |
The purpose of this assignment was to put Convolutional NN into work, while using only L.Dense would result in the model constructed with only MLP, which was proved to perform worse than CNN in such cases
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Reconstruction MSE is too high for this architecture even for 32 epochs of training.