Created
October 3, 2017 10:06
-
-
Save thomwolf/e309e779a08c1ba899514d44355cd6df to your computer and use it in GitHub Desktop.
A Keras Attention Layer for DeepMoji model
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
class AttentionWeightedAverage(Layer): | |
""" | |
Computes a weighted average of the different channels across timesteps. | |
Uses 1 parameter pr. channel to compute the attention value for a single timestep. | |
""" | |
def __init__(self, return_attention=False, **kwargs): | |
self.init = initializers.get('uniform') | |
self.supports_masking = True | |
self.return_attention = return_attention | |
super(AttentionWeightedAverage, self).__init__(** kwargs) | |
def build(self, input_shape): | |
self.input_spec = [InputSpec(ndim=3)] | |
assert len(input_shape) == 3 | |
self.W = self.add_weight(shape=(input_shape[2], 1), | |
name='{}_W'.format(self.name), | |
initializer=self.init) | |
self.trainable_weights = [self.W] | |
super(AttentionWeightedAverage, self).build(input_shape) | |
def call(self, x, mask=None): | |
# computes a probability distribution over the timesteps | |
# uses 'max trick' for numerical stability | |
# reshape is done to avoid issue with Tensorflow | |
# and 1-dimensional weights | |
logits = K.dot(x, self.W) | |
x_shape = K.shape(x) | |
logits = K.reshape(logits, (x_shape[0], x_shape[1])) | |
ai = K.exp(logits - K.max(logits, axis=-1, keepdims=True)) | |
# masked timesteps have zero weight | |
if mask is not None: | |
mask = K.cast(mask, K.floatx()) | |
ai = ai * mask | |
att_weights = ai / K.sum(ai, axis=1, keepdims=True) | |
weighted_input = x * K.expand_dims(att_weights) | |
result = K.sum(weighted_input, axis=1) | |
if self.return_attention: | |
return [result, att_weights] | |
return result | |
def get_output_shape_for(self, input_shape): | |
return self.compute_output_shape(input_shape) | |
def compute_output_shape(self, input_shape): | |
output_len = input_shape[2] | |
if self.return_attention: | |
return [(input_shape[0], output_len), (input_shape[0], input_shape[1])] | |
return (input_shape[0], output_len) | |
def compute_mask(self, input, input_mask=None): | |
if isinstance(input_mask, list): | |
return [None] * len(input_mask) | |
else: | |
return None |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
what's the use of this class? Please explain how is it useful in deep learning and rnn models? And what's actually happening ....Please elaborate