Skip to content

Instantly share code, notes, and snippets.

@prl900
Created June 10, 2020 04:29
Show Gist options
  • Save prl900/6e4bb30e28d675be970d08c1f58edb67 to your computer and use it in GitHub Desktop.
Save prl900/6e4bb30e28d675be970d08c1f58edb67 to your computer and use it in GitHub Desktop.
import logging, os
logging.disable(logging.WARNING)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import tensorflow as tf
from tensorflow.keras import models
from tensorflow.keras import layers
from tensorflow.keras.layers import BatchNormalization, Conv2D, UpSampling2D, MaxPooling2D, Dropout
from tensorflow.keras.optimizers import SGD, Adam, Adagrad
import numpy as np
from tensorflow.keras import backend as K
from datetime import datetime
from datetime import timedelta
import pickle
import os
import math
x_train = np.load("x_train.npy")
#xz_train = np.load("xz_train.npy")
y_train = (np.load("y_train.npy") > 5).astype(np.float32)
x_test = np.load("x_test.npy")
#xz_test = np.load("xz_test.npy")
y_test = (np.load("y_test.npy") > 5).astype(np.float32)
print("MSE train", np.mean(np.square(y_train)))
print("MSE test", np.mean(np.square(y_test)))
def weighted_bce(y_true, y_pred):
weights = (y_true * 10.) + 1.
bce = K.binary_crossentropy(y_true, y_pred)
weighted_bce = K.mean(bce * weights)
return weighted_bce
def dice_loss(y_true, y_pred):
numerator = 2 * tf.reduce_sum(y_true * y_pred, axis=-1)
denominator = tf.reduce_sum(y_true + y_pred, axis=-1)
return 1 - (numerator + 1) / (denominator + 1)
def tversky_loss(beta):
def loss(y_true, y_pred):
numerator = tf.reduce_sum(y_true * y_pred, axis=-1)
denominator = y_true * y_pred + beta * (1 - y_true) * y_pred + (1 - beta) * y_true * (1 - y_pred)
return 1 - (numerator + 1) / (tf.reduce_sum(denominator, axis=-1) + 1)
return loss
def binary_focal_loss(y_true, y_pred):
"""
:param y_true: A tensor of the same shape as `y_pred`
:param y_pred: A tensor resulting from a sigmoid
:return: Output tensor.
"""
gamma=2.
alpha=.75
pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))
pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))
epsilon = K.epsilon()
# clip to prevent NaN's and Inf's
pt_1 = K.clip(pt_1, epsilon, 1. - epsilon)
pt_0 = K.clip(pt_0, epsilon, 1. - epsilon)
return -K.sum(alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1)) \
-K.sum((1 - alpha) * K.pow(pt_0, gamma) * K.log(1. - pt_0))
def get_unet():
concat_axis = 3
ref_input = layers.Input(shape = (512, 512, 2))
#z_input = layers.Input(shape = (64, 64, 3))
feats = 4#16
bn0 = BatchNormalization(axis=3)(ref_input)
conv1 = layers.Conv2D(feats, (3, 3), activation='relu', padding='same', name='conv1_1')(bn0)
bn2 = BatchNormalization(axis=3)(conv1)
pool1 = layers.MaxPooling2D(pool_size=(2, 2))(bn2) #256
conv2 = layers.Conv2D(2*feats, (3, 3), activation='relu', padding='same')(pool1)
bn4 = BatchNormalization(axis=3)(conv2)
pool2 = layers.MaxPooling2D(pool_size=(2, 2))(bn4) #128
conv3 = layers.Conv2D(4*feats, (3, 3), activation='relu', padding='same')(pool2)
bn6 = BatchNormalization(axis=3)(conv3)
pool3 = layers.MaxPooling2D(pool_size=(2, 2))(bn6) #64
#zadd = layers.concatenate([z_input, pool3], axis=concat_axis)
#nzadd = layers.BatchNormalization(axis=3)(zadd)
conv4 = layers.Conv2D(8*feats, (3, 3), activation='relu', padding='same')(pool3)
bn8 = BatchNormalization(axis=3)(conv4)
pool4 = layers.MaxPooling2D(pool_size=(2, 2))(bn8) #32
conv5 = layers.Conv2D(16*feats, (3, 3), activation='relu', padding='same')(pool4)
bn10 = BatchNormalization(axis=3)(conv5)
pool5 = layers.MaxPooling2D(pool_size=(2, 2))(bn10) #16
conv6 = layers.Conv2D(32*feats, (3, 3), activation='relu', padding='same')(pool5)
bn11 = BatchNormalization(axis=3)(conv6)
up_conv6 = layers.UpSampling2D(size=(2, 2))(bn11) #32
up7 = layers.concatenate([up_conv6, conv5], axis=concat_axis)
conv7 = layers.Conv2D(16*feats, (3, 3), activation='relu', padding='same')(up7)
bn13 = BatchNormalization(axis=3)(conv7)
up_conv5 = layers.UpSampling2D(size=(2, 2))(bn13) #64
up6 = layers.concatenate([up_conv5, conv4], axis=concat_axis)
conv6 = layers.Conv2D(8*feats, (3, 3), activation='relu', padding='same')(up6)
bn15 = BatchNormalization(axis=3)(conv6)
up_conv6 = layers.UpSampling2D(size=(2, 2))(bn15) #128
up7 = layers.concatenate([up_conv6, conv3], axis=concat_axis)
conv7 = layers.Conv2D(4*feats, (3, 3), activation='relu', padding='same')(up7)
bn13 = BatchNormalization(axis=3)(conv7)
# Rectify last convolution layer to constraint output to positive precipitation values.
conv8 = layers.Conv2D(1, (1, 1), activation='sigmoid')(bn13)
#model = models.Model(inputs=[ref_input,z_input], outputs=conv8)
model = models.Model(inputs=ref_input, outputs=conv8)
return model
model = get_unet()
print(model.summary())
opt = Adagrad(lr=0.001, decay=1e-6)
model.compile(optimizer=opt, loss=weighted_bce, metrics=[tf.keras.metrics.Precision(name='p'),tf.keras.metrics.Recall(name='r')])
print(tf.config.experimental.list_physical_devices('GPU'))
#history = model.fit((x_train, xz_train), y_train, validation_data=((x_test, xz_test), y_test), epochs=50)
history = model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=50)
with open('history_3months_100epochs_4chan_class.pkl', 'wb') as f:
pickle.dump(history.history, f)
model.save('model_3months_100epochs_4chan_class.h5')
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment