MOM.modal.openSessionModal(options);
title: 'Your session is about to expire',
message: 'You will be logged out in <strong>{time}</strong> seconds. Do you want to stay logged in?',
confirm_action: 'javascript:fnConfirmSessionExtend()',
| from autobahn.twisted.websocket import WebSocketServerProtocol, WebSocketServerFactory, listenWS | |
| from twisted.python import log | |
| from autobahn.util import newid, utcnow | |
| import Cookie | |
| from chatterbot import ChatBot | |
| from chatterbot.trainers import ChatterBotCorpusTrainer | |
| from chatterbot.conversation import Statement | |
| from users import Users | |
| import sys |
| function toNestedArray (data, rootLevel = 0, parentNode) { | |
| let output = [] | |
| for (var i = 0; i < data.length; i++) { | |
| var count = data[i].count | |
| var items = data[i].name.split('/') | |
| var hasParent = items.length > rootLevel | |
| if (hasParent) { | |
| let parent = rootLevel | |
| ? items.length === rootLevel | |
| ? null |
| from __future__ import print_function | |
| import json | |
| import os | |
| import numpy as np | |
| from gensim.models import Word2Vec | |
| from gensim.utils import simple_preprocess | |
| from keras.engine import Input | |
| from keras.layers import Embedding, merge |
| from keras.layers.core import Layer | |
| from keras import initializers, regularizers, constraints | |
| from keras import backend as K | |
| class Attention(Layer): | |
| def __init__(self, | |
| kernel_regularizer=None, bias_regularizer=None, | |
| kernel_constraint=None, bias_constraint=None, | |
| use_bias=True, **kwargs): | |
| """ |
| from keras import backend as K | |
| from keras.layers import Input, Dense, merge, Dropout, Lambda, LSTM, Masking | |
| from keras.models import Model, Sequential | |
| from keras.optimizers import SGD, RMSprop, Adam, Nadam | |
| from sys import argv | |
| import argparse | |
| import csv | |
| import json | |
| import numpy as np | |
| import pickle |
| class AttentionLayer(Layer): | |
| ''' | |
| Attention layer. | |
| Usage: | |
| lstm_layer = LSTM(dim, return_sequences=True) | |
| attention = AttentionLayer()(lstm_layer) | |
| sentenceEmb = merge([lstm_layer, attention], mode=lambda x:x[1]*x[0], output_shape=lambda x:x[0]) | |
| sentenceEmb = Lambda(lambda x:K.sum(x, axis=1), output_shape=lambda x:(x[0],x[2]))(sentenceEmb) | |
| ''' | |
| def __init__(self, init='glorot_uniform', kernel_regularizer=None, bias_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): |
| class AttentionWithContext(Layer): | |
| """ | |
| Attention operation, with a context/query vector, for temporal data. | |
| Supports Masking. | |
| Follows the work of Yang et al. [https://www.cs.cmu.edu/~diyiy/docs/naacl16.pdf] | |
| "Hierarchical Attention Networks for Document Classification" | |
| by using a context vector to assist the attention | |
| # Input shape | |
| 3D tensor with shape: `(samples, steps, features)`. | |
| # Output shape |
| { test: require.resolve("react"), loader: "expose-loader?React" }, | |
| { test: require.resolve("react-dom"), loader: "expose-loader?ReactDOM" }, | |
| { | |
| test: /redux\/es\/index.js/, | |
| use: [ | |
| { | |
| loader: 'expose-loader', | |
| options: 'Redux' | |
| } | |
| ] |
| class AttentionLSTM(LSTM): | |
| """LSTM with attention mechanism | |
| This is an LSTM incorporating an attention mechanism into its hidden states. | |
| Currently, the context vector calculated from the attended vector is fed | |
| into the model's internal states, closely following the model by Xu et al. | |
| (2016, Sec. 3.1.2), using a soft attention model following | |
| Bahdanau et al. (2014). | |
| The layer expects two inputs instead of the usual one: |