Skip to content

Instantly share code, notes, and snippets.

View orsonadams's full-sized avatar
💭
💞

Orson Adams orsonadams

💭
💞
View GitHub Profile
@nigeljyng
nigeljyng / TemporalMaxPooling.py
Last active February 21, 2020 15:56
Temporal max pooling as implemented in https://arxiv.org/abs/1511.04108
from keras import backend as K
from keras.engine import InputSpec
from keras.engine.topology import Layer
import numpy as np
class TemporalMaxPooling(Layer):
"""
This pooling layer accepts the temporal sequence output by a recurrent layer
and performs temporal pooling, looking at only the non-masked portion of the sequence.
from gensim.models import KeyedVectors
# Load gensim word2vec
w2v_path = '<Gensim File Path>'
w2v = KeyedVectors.load_word2vec_format(w2v_path)
import io
# Vector file, `\t` seperated the vectors and `\n` seperate the words
"""
@cbaziotis
cbaziotis / SelfAttention.py
Created April 21, 2018 17:31
SelfAttention implementation in PyTorch
class SelfAttention(nn.Module):
def __init__(self, attention_size, batch_first=False, non_linearity="tanh"):
super(SelfAttention, self).__init__()
self.batch_first = batch_first
self.attention_weights = Parameter(torch.FloatTensor(attention_size))
self.softmax = nn.Softmax(dim=-1)
if non_linearity == "relu":
self.non_linearity = nn.ReLU()