Last active
February 20, 2020 22:06
-
-
Save ypeleg/2b16a173d964bfc3eb0b2932c51c079b to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# -*- coding: utf-8 -*- | |
import os | |
import sys | |
import keras | |
import shutil | |
import numpy as np | |
import pandas as pd | |
import keras.backend as K | |
from keras.models import Model | |
import matplotlib.pyplot as plt | |
from sklearn.utils import shuffle | |
from keras.optimizers import Adam | |
from keras.models import Sequential, Model | |
from keras.layers import Layer, Dropout, LeakyReLU | |
from keras.layers.advanced_activations import LeakyReLU | |
from keras.backend.tensorflow_backend import set_session | |
from keras import activations, constraints, initializers, regularizers | |
from keras.layers import Input, Dense, Reshape, Flatten, BatchNormalization | |
from keras.layers import Input, GlobalAveragePooling1D, Dense, Layer, Multiply | |
from machine_gan import GAN | |
from machine_gan.schemes.IWGAN_TrainingScheme import IWGAN_TrainingScheme, wasserstein_loss | |
NEG_INF = -1e38 | |
DATA_DIR = '/t/Datasets/GraphDatasets/' | |
from keras.layers import Input, Activation, Multiply, Reshape, Permute, TimeDistributed, Dense, Layer, Lambda, BatchNormalization | |
from keras.models import Model | |
from keras.models import Model | |
from keras.layers import Input, Activation, Multiply, Reshape, Permute, TimeDistributed, Dense, Layer, Lambda, BatchNormalization | |
import numpy as np | |
from keras.models import Model, load_model | |
from keras.layers import Input, BatchNormalization, Activation, Add, Multiply, Dot | |
from keras.layers import Embedding, Permute, Reshape | |
from keras.layers.core import Dropout, Lambda, Dense, Flatten | |
from keras.layers.convolutional import Conv1D, Conv2D | |
from keras.layers.pooling import GlobalMaxPooling1D, GlobalAveragePooling1D | |
from keras.layers.merge import Concatenate | |
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau | |
from keras.optimizers import Adam, SGD, Nadam | |
from keras import backend as K | |
from keras.engine.topology import Layer | |
import tensorflow as tf | |
from keras import backend as K | |
class LayerNormalization(keras.layers.Layer): | |
def __init__(self, | |
center=True, | |
scale=True, | |
epsilon=None, | |
gamma_initializer='ones', | |
beta_initializer='zeros', | |
gamma_regularizer=None, | |
beta_regularizer=None, | |
gamma_constraint=None, | |
beta_constraint=None, | |
**kwargs): | |
"""Layer normalization layer | |
See: [Layer Normalization](https://arxiv.org/pdf/1607.06450.pdf) | |
:param center: Add an offset parameter if it is True. | |
:param scale: Add a scale parameter if it is True. | |
:param epsilon: Epsilon for calculating variance. | |
:param gamma_initializer: Initializer for the gamma weight. | |
:param beta_initializer: Initializer for the beta weight. | |
:param gamma_regularizer: Optional regularizer for the gamma weight. | |
:param beta_regularizer: Optional regularizer for the beta weight. | |
:param gamma_constraint: Optional constraint for the gamma weight. | |
:param beta_constraint: Optional constraint for the beta weight. | |
:param kwargs: | |
""" | |
super(LayerNormalization, self).__init__(**kwargs) | |
self.supports_masking = True | |
self.center = center | |
self.scale = scale | |
if epsilon is None: | |
epsilon = K.epsilon() * K.epsilon() | |
self.epsilon = epsilon | |
self.gamma_initializer = keras.initializers.get(gamma_initializer) | |
self.beta_initializer = keras.initializers.get(beta_initializer) | |
self.gamma_regularizer = keras.regularizers.get(gamma_regularizer) | |
self.beta_regularizer = keras.regularizers.get(beta_regularizer) | |
self.gamma_constraint = keras.constraints.get(gamma_constraint) | |
self.beta_constraint = keras.constraints.get(beta_constraint) | |
self.gamma, self.beta = None, None | |
def get_config(self): | |
config = { | |
'center': self.center, | |
'scale': self.scale, | |
'epsilon': self.epsilon, | |
'gamma_initializer': keras.initializers.serialize(self.gamma_initializer), | |
'beta_initializer': keras.initializers.serialize(self.beta_initializer), | |
'gamma_regularizer': keras.regularizers.serialize(self.gamma_regularizer), | |
'beta_regularizer': keras.regularizers.serialize(self.beta_regularizer), | |
'gamma_constraint': keras.constraints.serialize(self.gamma_constraint), | |
'beta_constraint': keras.constraints.serialize(self.beta_constraint), | |
} | |
base_config = super(LayerNormalization, self).get_config() | |
return dict(list(base_config.items()) + list(config.items())) | |
def compute_output_shape(self, input_shape): | |
return input_shape | |
def compute_mask(self, inputs, input_mask=None): | |
return input_mask | |
def build(self, input_shape): | |
shape = input_shape[-1:] | |
if self.scale: | |
self.gamma = self.add_weight( | |
shape=shape, | |
initializer=self.gamma_initializer, | |
regularizer=self.gamma_regularizer, | |
constraint=self.gamma_constraint, | |
name='gamma', | |
) | |
if self.center: | |
self.beta = self.add_weight( | |
shape=shape, | |
initializer=self.beta_initializer, | |
regularizer=self.beta_regularizer, | |
constraint=self.beta_constraint, | |
name='beta', | |
) | |
super(LayerNormalization, self).build(input_shape) | |
def call(self, inputs, training=None): | |
mean = K.mean(inputs, axis=-1, keepdims=True) | |
variance = K.mean(K.square(inputs - mean), axis=-1, keepdims=True) | |
std = K.sqrt(variance + self.epsilon) | |
outputs = (inputs - mean) / std | |
if self.scale: | |
outputs *= self.gamma | |
if self.center: | |
outputs += self.beta | |
return outputs | |
class ScaleLayer(Layer): | |
def __init__(self, output_dim, **kwargs): | |
self.output_dim = output_dim | |
super(ScaleLayer, self).__init__(**kwargs) | |
def build(self, input_shape): | |
super(ScaleLayer, self).build(input_shape) | |
def call(self, x): | |
xx = K.arange(-99, 100, dtype=tf.float32) | |
mu = y_mean + tf.reshape(x[:, 0], (-1, 1)) | |
sigma_minus = tf.identity(K.exp(0.5 * tf.reshape(x[:, 1], (-1, 1))), name="sigma") | |
sigma_plus = tf.identity(K.exp(0.5 * tf.reshape(x[:, 2], (-1, 1))), name="sigma") | |
xx = tf.subtract(xx, mu) | |
pcf = tf.where(xx >= 0, tf.divide (xx, sigma_plus), tf.divide (xx, sigma_minus)) | |
return pcf | |
def compute_output_shape(self, input_shape): | |
return (input_shape[0], self.output_dim) | |
def edge_attention(adj, nodes, dropout): | |
dist1 = Reshape((adj.shape[1], adj.shape[1], 1))(adj) | |
dist1 = Conv2D(16, 1, activation='relu', kernel_initializer='glorot_uniform', bias_initializer='glorot_uniform', )(dist1) | |
dist1 = Conv2D(1, 1, activation='relu', kernel_initializer='glorot_uniform', bias_initializer='glorot_uniform', )(dist1) | |
dist1 = Reshape((adj.shape[1], adj.shape[1]))(dist1) | |
adj = Add()([adj, dist1]) | |
adj = LayerNormalization()(adj) | |
att = Lambda(lambda c: K.batch_dot(c[0], c[1]))([adj, nodes]) | |
x_player = Add()([nodes, att]) | |
x_player = LayerNormalization()(x_player) | |
if dropout > 0: x_player = Dropout(dropout)(x_player) | |
return x_player | |
def attention(x_inner, x_outer, n_factor, dropout): | |
x_Q = Conv1D(n_factor, 1, activation='linear', kernel_initializer='glorot_uniform', bias_initializer='glorot_uniform',)(x_inner) | |
x_K = Conv1D(n_factor, 1, activation='linear', kernel_initializer='glorot_uniform',bias_initializer='glorot_uniform', )(x_outer) | |
x_V = Conv1D(n_factor, 1, activation='linear', kernel_initializer='glorot_uniform', bias_initializer='glorot_uniform', )(x_outer) | |
x_KT = Permute((2, 1))(x_K) | |
res = Lambda(lambda c: K.batch_dot(c[0], c[1]) / np.sqrt(n_factor))([x_Q, x_KT]) | |
att = Lambda(lambda c: K.softmax(c, axis=-1))(res) | |
att = Lambda(lambda c: K.batch_dot(c[0], c[1]))([att, x_V]) | |
return att | |
def multi_head_self_attention(x, n_factor, n_head, dropout): | |
if n_head == 1: | |
att = attention(x, x, n_factor, dropout) | |
else: | |
n_factor_head = n_factor // n_head | |
heads = [attention(x, x, n_factor_head, dropout) for i in range(n_head)] | |
att = Concatenate()(heads) | |
att = Dense(n_factor, | |
kernel_initializer='glorot_uniform', | |
bias_initializer='glorot_uniform', | |
)(att) | |
x = Add()([x, att]) | |
x = LayerNormalization()(x) | |
if dropout > 0: | |
x = Dropout(dropout)(x) | |
return x | |
def multi_head_outer_attention(x_inner, x_outer, n_factor, n_head, dropout): | |
if n_head == 1: | |
att = attention(x_inner, x_outer, n_factor, dropout) | |
else: | |
n_factor_head = n_factor // n_head | |
heads = [attention(x_inner, x_outer, n_factor_head, dropout) for i in range(n_head)] | |
att = Concatenate()(heads) | |
att = Dense(n_factor, | |
kernel_initializer='glorot_uniform', | |
bias_initializer='glorot_uniform', | |
)(att) | |
x_inner = Add()([x_inner, att]) | |
x_inner = LayerNormalization()(x_inner) | |
if dropout > 0: | |
x = Dropout(dropout)(x_inner) | |
return x | |
def se_block(in_bloc, ch, ratio): | |
x = GlobalAveragePooling1D()(in_bloc) | |
x = Dense(ch//ratio, activation='relu')(x) | |
x = Dense(ch, activation='sigmoid')(x) | |
x = Multiply()([in_bloc, x]) | |
return Add()([x, in_bloc]) | |
def conv_block(nodes, n_factor, n_hidden, se_ratio, dropout): | |
players0 = nodes | |
nodes = Conv1D(n_hidden, 1, activation='relu', kernel_initializer='glorot_uniform', bias_initializer='glorot_uniform', )(nodes) | |
nodes = Conv1D(n_factor, 1, activation='relu', kernel_initializer='glorot_uniform', bias_initializer='glorot_uniform', )(nodes) | |
nodes = Add()([players0, nodes]) | |
nodes = se_block(nodes, n_factor, se_ratio) | |
nodes = LayerNormalization()(nodes) | |
if dropout > 0: nodes = Dropout(dropout)(nodes) | |
return nodes | |
def graph_destroyer(n_nodes, n_feats, n_graph_feats, n_factor, n_loops, n_head, n_hidden, se_ratio, dropout, output_size): | |
input_nodes = Input((n_nodes, n_feats), name="main_categ") | |
input_edges = Input((n_nodes, n_nodes), name="inv_dist") | |
input_graphs = Input((n_graph_feats,), name="graphs") | |
x_node = input_nodes | |
x_node = Conv1D(n_factor, 1)(x_node) | |
x_node = LayerNormalization()(x_node) | |
for l in range(n_loops): | |
x_node = edge_attention(input_edges, x_node, dropout) | |
x_node = conv_block(x_node, n_factor, n_hidden, se_ratio, dropout) | |
x_node = multi_head_self_attention(x_node, n_factor, n_head, dropout) | |
x_node = conv_block(x_node, n_factor, n_hidden, se_ratio, dropout) | |
x_graphs = Dense(n_factor)(input_graphs) | |
x_graphs = Reshape((1, -1))(x_graphs) | |
readout = multi_head_outer_attention(x_graphs, x_node, n_factor, n_head, dropout) | |
readout = Flatten()(readout) | |
out1 = Dense(199, activation='sigmoid')(readout) | |
readout = Dense(output_size)(readout) | |
readout = ScaleLayer(output_dim=199)(readout) | |
out2 = keras.layers.Activation('sigmoid')(readout) | |
return Model(inputs=[input_nodes, input_edges, input_graphs], outputs=[out1, out2]) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Any sample data to run it on? ;)