Skip to content

Instantly share code, notes, and snippets.

View hamelsmu's full-sized avatar
💻
Always learning.

Hamel Husain hamelsmu

💻
Always learning.
View GitHub Profile
@hamelsmu
hamelsmu / atom_settings
Created December 18, 2016 19:58
atom_settings
test
# reference: https://hub.docker.com/_/ubuntu/
FROM ubuntu:16.04
# Adds metadata to the image as a key value pair example LABEL version="1.0"
LABEL maintainer="Hamel Husain <[email protected]>"
##Set environment variables
ENV LANG=C.UTF-8 LC_ALL=C.UTF-8
RUN apt-get update --fix-missing && apt-get install -y wget bzip2 ca-certificates \
SELECT
url as issue_url
-- replace more than one white-space character in a row with a single space
, REGEXP_REPLACE(title, r"\s{2,}", ' ') as issue_title
, REGEXP_REPLACE(body, r"\s{2,}", ' ') as body
FROM(
SELECT
JSON_EXTRACT(payload, '$.issue.html_url') as url
-- extract the title and body removing parentheses, brackets, and quotes
from ktext.preprocess import processor
# instantiate data processing object
body_pp = processor(keep_n=8000, padding_maxlen=70)
# process data
train_body_vecs = body_pp.fit_transform(train_body_raw)
# instantiate the pre-processor for titles
title_pp = processor(append_indicators=True, keep_n=4500,
padding_maxlen=12, padding ='post')
# process the titles
train_title_vecs = title_pp.fit_transform(train_title_raw)
from keras.models import Model
from keras.layers import Input, LSTM, GRU, Dense, Embedding, Bidirectional, BatchNormalization
from keras import optimizers
#arbitrarly set latent dimension for embedding and hidden units
latent_dim = 300
##### Define Model Architecture ######
########################
from keras.callbacks import CSVLogger, ModelCheckpoint
#setup callbacks for model logging
script_name_base = 'tutorial_seq2seq'
csv_logger = CSVLogger('{:}.log'.format(script_name_base))
model_checkpoint = ModelCheckpoint('{:}.epoch{{epoch:02d}}-val{{val_loss:.5f}}.hdf5'.format(script_name_base),
save_best_only=True)
# pass arguments to model.fit
batch_size = 1200
def extract_decoder_model(model):
"""
Extract the decoder from the original model.
Inputs:
------
model: keras model object
Returns:
-------
def build_seq2seq_model(word_emb_dim,
hidden_state_dim,
encoder_seq_len,
num_encoder_tokens,
num_decoder_tokens):
"""
Builds architecture for sequence to sequence model.
Encoder and Decoder layer consist of one GRU Layer each. User
can specify the dimensionality of the word embedding and the hidden state.
def cleanup_virtualenv(bare=True):
if not bare:
click.echo(crayons.red('Environment creation aborted.'))
try:
# Delete the virtualenv.
rmtree(project.virtualenv_location)
except OSError as e:
click.echo(
'{0} An error occurred while removing {1}!'.format(
crayons.red('Error: ', bold=True),