Skip to content

Instantly share code, notes, and snippets.

View ntakouris's full-sized avatar
🤖
Building robots

Theodoros Ntakouris ntakouris

🤖
Building robots
View GitHub Profile
def lr_scheduler(epoch, lr, warmup_epochs=15, decay_epochs=100, initial_lr=1e-6, base_lr=1e-3, min_lr=5e-5):
if epoch <= warmup_epochs:
pct = epoch / warmup_epochs
return ((base_lr - initial_lr) * pct) + initial_lr
if epoch > warmup_epochs and epoch < warmup_epochs+decay_epochs:
pct = 1 - ((epoch - warmup_epochs) / decay_epochs)
return ((base_lr - min_lr) * pct) + min_lr
return min_lr
class ModelTrunk(keras.Model):
def __init__(self, name='ModelTrunk', time2vec_dim=1, num_heads=2, head_size=128, ff_dim=None, num_layers=1, dropout=0, **kwargs):
super().__init__(name=name, **kwargs)
self.time2vec = Time2Vec(kernel_size=time2vec_dim)
if ff_dim is None:
ff_dim = head_size
self.dropout = dropout
self.attention_layers = [AttentionBlock(num_heads=num_heads, head_size=head_size, ff_dim=ff_dim, dropout=dropout) for _ in range(num_layers)]
from tensorflow_addons.layers import MultiHeadAttention
class AttentionBlock(keras.Model):
def __init__(self, name='AttentionBlock', num_heads=2, head_size=128, ff_dim=None, dropout=0, **kwargs):
super().__init__(name=name, **kwargs)
if ff_dim is None:
ff_dim = head_size
self.attention = MultiHeadAttention(num_heads=num_heads, head_size=head_size, dropout=dropout)
class Time2Vec(keras.layers.Layer):
def __init__(self, kernel_size=1):
super(Time2Vec, self).__init__(trainable=True, name='Time2VecLayer')
self.k = kernel_size
def build(self, input_shape):
# trend
self.wb = self.add_weight(name='wb',shape=(input_shape[1],),initializer='uniform',trainable=True)
self.bb = self.add_weight(name='bb',shape=(input_shape[1],),initializer='uniform',trainable=True)
# periodic
@staticmethod
def gh_jacobian(y, x):
# x must be a vector
# y must be matrix with dimensions [batch_size, n], n > 1. if n == 1 use the _vec version
J = tf.map_fn(lambda m: tf.gradients(y[:,m:m+1], x)[0], tf.range(tf.shape(y)[-1]), tf.float32)
#J = tf.Print(J, [tf.shape(J)], "J shape = ", summarize=-1)
J = tf.transpose(tf.squeeze(J), perm = [1,0,2])
return J
@staticmethod
with tf.gfile.GFile(modelfilename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
self.graph_def = graph_def
with tf.Graph().as_default() as g1:
#gi_name = model_name+'/'+gen_input
gi = tf.placeholder(tf.float32,
[None, self.z_dim],
name='g1_gi')
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
docker run -d -v $DIR/grafana:/var/lib/grafana --name=grafana -p 3000:3000 grafana/grafana
ID Text Sentiment
N1 I've had an IPhone for like 3 years and I've never done that :( hate ma life negative
N2 I dont need the new iphone but I want it :( negative
N3 fuck the iPhone 6s cus I'm not getting one :( negative
N4 Hopefully will be able to get my iPhone 6s today :( Plane gets in at like 11:30 :/ negative
N5 @clydesdalebank I've just bought a new iPhone and I can't use Apple Pay because you don't support it. When will I get to use it? :( negative
N6 The new iphone is out today but mine doesn't come til next week :( &lt;/3 negative
N7 Also have to work an iPhone launch at t-mobile :( negative
IntelliJ IDEAPyCharm
from typing import Dict, Text
import tensorflow as tf
from absl import logging
from tensorflow.keras.layers import (LSTM, Activation, Concatenate, Dense)
import kerastuner
from rnn.constants import (INPUT_FEATURE_KEYS, PREDICT_FEATURE_KEYS,
HP_HIDDEN_LATENT_DIM,
HP_HIDDEN_LAYER_NUM, HP_LR,
from typing import Tuple, Text, Dict
import tensorflow as tf
from tensorflow.keras.layers import Input, Concatenate, Reshape, Dense
from input_fn_utils import transformed_name
def get_input_graph(input_feature_keys, input_window_size) -> Tuple[Input, tf.keras.layers.Layer]:
"""
Creates the named input layers, strips the column names and provides
them as a plain tensor.