Skip to content

Instantly share code, notes, and snippets.

@ground0state
Last active August 22, 2019 14:23
Show Gist options
  • Save ground0state/c2076fcfe4a60b017ee7594b65621beb to your computer and use it in GitHub Desktop.
Save ground0state/c2076fcfe4a60b017ee7594b65621beb to your computer and use it in GitHub Desktop.
import os
import glob
import math
import random
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.python import keras
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.models import Model, Sequential
from tensorflow.python.keras.layers import *
from tensorflow.python.keras.preprocessing.image import load_img, img_to_array, array_to_img, ImageDataGenerator
def drop_resolution(x, scale=3.0):
size = (x.shape[0], x.shape[1])
small_size = (int(size[0]/scale), int(size[1]/scale))
img = array_to_img(x)
small_img = img.resize(small_size, 3)
return img_to_array(small_img.resize(img.size, 3))
def data_generator(data_dir, mode, scale=2.0, target_size=(200, 200), batch_size=32, shuffle=True):
for imgs in ImageDataGenerator().flow_from_directory(
directory=data_dir,
classes=[mode],
class_mode=None,
color_mode='rgb',
target_size=target_size,
batch_size=batch_size,
shuffle=shuffle
):
x = np.array([drop_resolution(img, scale) for img in imgs])
yield x/255., imgs/255.
DATA_DIR = ""
N_TRAIN_DATA = 1000
N_TEST_DATA = 100
BATCH_SIZE = 32
train_data_generator = data_generator(DATA_DIR, 'train', batch_size=BATCH_SIZE)
x_test, y_test = next(data_generator(DATA_DIR, 'test', batch_size=N_TEST_DATA, shuffle=False))
inputs = Input(shape=(None, None, 3))
x = Conv2D(filters=64, kernel_size=(9, 9), strides=(1, 1), activation='relu', padding='same')(inputs)
x = Conv2D(filters=32, kernel_size=(1, 1), strides=(1, 1), activation='relu', padding='same')(x)
outputs = Conv2D(filters=3, kernel_size=(5, 5), strides=(1, 1), padding='same')(x)
model = Model(inputs, outputs)
model.compile(optimizer='adam', loss='mse')
print(model.summary())
# skip connection
# inputs = Input(shape=(None, None, 3), dtype='float')
# # Encoder
# conv1 = Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), padding='same')(inputs)
# conv1 = Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), padding='same')(conv1)
# conv2 = Conv2D(filters=64, kernel_size=(3, 3), strides=(2, 2), padding='same')(conv1)
# conv2 = Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), padding='same')(conv2)
# conv3 = Conv2D(filters=64, kernel_size=(3, 3), strides=(2, 2), padding='same')(conv2)
# conv3 = Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), padding='same')(conv3)
# # Decoder
# deconv3 = Conv2DTranspose(filters=64, kernel_size=(3, 3), strides=(1, 1), padding='same')(conv3)
# deconv3 = Conv2DTranspose(filters=64, kernel_size=(3, 3), strides=(2, 2), padding='same')(deconv3)
# merge2 = Add()([deconv3, conv2])
# deconv2 = Conv2DTranspose(filters=64, kernel_size=(3, 3), strides=(1, 1), padding='same')(merge2)
# deconv2 = Conv2DTranspose(filters=64, kernel_size=(3, 3), strides=(2, 2), padding='same')(deconv2)
# merge1 = Add()([deconv2, conv1])
# deconv1 = Conv2DTranspose(filters=64, kernel_size=(3, 3), strides=(1, 1), padding='same')(merge1)
# deconv1 = Conv2DTranspose(filters=3, kernel_size=(3, 3), strides=(1, 1), padding='same')(deconv1)
# outputs = Add()([deconv1, inputs])
# model = Model(inputs, outputs)
# model.compile(optimizer='adam', loss='mse')
# print(model.summary())
def psnr(y_true, y_pred):
return -10*K.log(
K.mean(K.flatten((y_true - y_pred)**2))
)/np.log(10)
model.compile(loss='mean_squared_error', optimizer='adam', metrics=[psnr])
model.fit_generator(train_data_generator, validation_data=(x_test, y_test), steps_per_epoch=N_TRAIN_DATA//BATCH_SIZE, epochs=50)
pred = model.predict(x_test)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment