Created
February 1, 2019 15:18
-
-
Save jzuern/6ef1839e7a7b34a7b527a134717684fa to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from keras.datasets import mnist | |
import numpy as np | |
import matplotlib.pyplot as plt | |
from tensorflow.keras.layers import InputLayer, Conv2D, MaxPooling2D, Conv2DTranspose, Flatten, Dense, Reshape | |
from tensorflow.keras import Model, Sequential | |
import random | |
from scipy.ndimage.interpolation import zoom | |
from tensorflow.keras.models import load_model | |
from sklearn.metrics.pairwise import cosine_similarity | |
def load_patches(): | |
x = [] | |
import glob | |
import matplotlib.colors | |
imgs = sorted(glob.glob('/data/zuern/datasets/patches/*.npy')) | |
imgs = imgs[::10] | |
img_counter = 0 | |
for img in imgs: | |
print 'loading ', img | |
im = np.load(img) | |
x.append(im) | |
img_counter += 1 | |
x = np.array(x) | |
x = x.reshape([-1, 52, 52, 3]) / 255.0 | |
x = x.astype(np.float32) | |
print('PATCHES shape: ', x.shape) | |
print('PATCHES type: ', x.dtype) | |
print('PATCHES min, max : ', x.min(), x.max()) | |
split = int( 0.8*len(x)) | |
x_train = x[:split] | |
x_test = x[split:] | |
return x_train, x_test | |
def add_shadows(patches): | |
noise_sizes = [4, 2, 13] | |
shadow_patches = [] | |
for patch in patches: | |
noise_size = random.choice(noise_sizes) | |
mask = np.random.uniform(size=(noise_size, noise_size)) | |
mask = zoom(mask, 52 // noise_size) | |
mask = mask > 0.5 | |
idx = (mask == 0) | |
patch_shadow = patch.copy() | |
patch_shadow[idx] = patch[idx] / 3 | |
shadow_patches.append(patch_shadow) | |
return np.asarray(shadow_patches) | |
def get_model(): | |
filters=[32, 64, 128, 32] | |
input_shape=x_train.shape[1:] | |
if input_shape[0] % 8 == 0: | |
pad3 = 'same' | |
else: | |
pad3 = 'valid' | |
model = Sequential() | |
model.add(InputLayer(input_shape)) | |
model.add(Conv2D(filters[0], 5, strides=2, padding='same', activation='relu', name='conv1')) | |
model.add(Conv2D(filters[1], 5, strides=2, padding='same', activation='relu', name='conv2')) | |
model.add(Conv2D(filters[2], 3, strides=2, padding=pad3, activation='relu', name='conv3')) | |
model.add(Flatten()) | |
model.add(Dense(units=filters[3], name='embedding')) # the output of this layer is the Autoencoder embedding of the input | |
model.add(Dense(units=filters[2]*int(input_shape[0]/8)*int(input_shape[0]/8), activation='relu')) | |
model.add(Reshape((int(input_shape[0]/8), int(input_shape[0]/8), filters[2]))) | |
model.add(Conv2DTranspose(filters[1], 3, strides=2, padding=pad3, activation='relu', name='deconv3')) | |
model.add(Conv2DTranspose(filters[0], 5, strides=2, padding='same', activation='relu', name='deconv2')) | |
model.add(Conv2DTranspose(input_shape[2], 5, strides=2, padding='same', name='deconv1')) | |
return model | |
def train_shadow_autoencoder(): | |
model = get_model() | |
model.compile(optimizer='adam', loss='mse') | |
model.fit(x_train_shadowy, x_train, | |
epochs=10, | |
batch_size=128, | |
shuffle=True, | |
validation_data=(x_test_shadowy, x_test), | |
callbacks=[]) | |
model.save_weights('shadow_autoencoder.h5') | |
def train_normal_autoencoder(): | |
model = get_model() | |
model.compile(optimizer='adam', loss='mse') | |
model.fit(x_train, x_train, | |
epochs=10, | |
batch_size=128, | |
shuffle=True, | |
validation_data=(x_test, x_test), | |
callbacks=[]) | |
model.save_weights('normal_autoencoder.h5') | |
def test(): | |
layer_name = 'embedding' | |
shadow_model = get_model() | |
shadow_model.load_weights('shadow_autoencoder.h5') | |
shadow_encoder = Model(inputs=shadow_model.input, outputs=shadow_model.get_layer(layer_name).output) | |
normal_model = get_model() | |
normal_model.load_weights('normal_autoencoder.h5') | |
normal_encoder = Model(inputs=normal_model.input, outputs=normal_model.get_layer(layer_name).output) | |
query_patch_shadow = x_test_shadowy[10, :, :, :] | |
query_patch_normal = x_test[10, :, :, :] | |
query_patch_other = x_test[10, :, :, :] | |
features_shadow_shadow = shadow_encoder.predict(np.expand_dims(query_patch_shadow, axis=0)) | |
features_shadow_normal = shadow_encoder.predict(np.expand_dims(query_patch_normal, axis=0)) | |
features_shadow_other = shadow_encoder.predict(np.expand_dims(query_patch_other, axis=0)) | |
features_normal_shadow = normal_encoder.predict(np.expand_dims(query_patch_shadow, axis=0)) | |
features_normal_normal = normal_encoder.predict(np.expand_dims(query_patch_normal, axis=0)) | |
features_normal_other = normal_encoder.predict(np.expand_dims(query_patch_other, axis=0)) | |
for i in range(0, 10): | |
tmp = np.expand_dims(query_patch_normal, axis=0) | |
tmp = add_shadows(tmp) | |
f_shadow = shadow_encoder.predict(tmp) | |
print 'SHADOW MODEL: cosine similarity with random shadows: ', cosine_similarity(features_shadow_normal, f_shadow) | |
f_normal = normal_encoder.predict(tmp) | |
print 'NORMAL MODEL: cosine similarity with random shadows: ', cosine_similarity(features_normal_normal, f_normal) | |
if __name__ == '__main__': | |
x_train, x_test = load_patches() | |
x_train_shadowy = add_shadows(x_train) | |
x_test_shadowy = add_shadows(x_test) | |
train_shadow_autoencoder() | |
train_normal_autoencoder() | |
test() | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment