Skip to content

Instantly share code, notes, and snippets.

View isaacmg's full-sized avatar

Isaac Godfried isaacmg

View GitHub Profile
""" Use torchMoji to predict emojis from a single text input
"""
from __future__ import print_function, division, unicode_literals
import examples.example_helper
import json
import csv
import argparse
import numpy as np
@isaacmg
isaacmg / commands.md
Last active September 18, 2019 15:40

Serving a TTS model with Seldon

In this walkthrough you will learn to serve a multi-part PyTorch model with Seldon. +* +*

s2i build . seldonio/seldon-core-s2i-python3:0.10 kubeflow/tts_encoder

docker run --name "EncoderServe" --rm -p 5000:5000 kubeflow/tts_encoder

!allennlp train babi_train_meta.jsonnet -s /tmp/serialization_dir --include-package allennlp.training.metatrainer
class AttendDiagnose(nn.Module):
def __init__(self, number_measurements, filter_number):
super().__init__()
self.d_model = filter_number*number_measurements
self.embedding_conv = nn.Conv1d(number_measurements, filter_number*number_measurements, 1)
self.pe = PositionalEncoding(filter_number*number_measurements)
# embed_dim and attention_heads
self.masked_attn = nn.modules.activation.MultiheadAttention(filter_number*number_measurements, 8)
self.norm = nn.modules.normalization.LayerNorm(self.d_model)
self.final_layer = nn.Linear(self.d_model, 1)
class SimpleTransformer(torch.nn.Module):
def __init__(self, n_time_series, seq_len, d_model=128):
super().__init__()
self.dense_shape = torch.nn.Linear(n_time_series, d_model)
self.pe = SimplePositionalEncoding(d_model)
self.transformer = Transformer(d_model, nhead=8)
self.final_layer = torch.nn.Linear(d_model, 1)
self.sequence_size = seq_len
def forward(self, x, t, tgt_mask, src_mask=None):
if src_mask:
# ML Core
scikit-learn==0.22.1
pandas==1.01
torch==1.40
tb-nightly==2.2.0
# Graphs and param management
seaborn==0.10.0
future==0.18.2
wandb==0.8.25
# GCP dependencies
@TrainerBase.register('metatrainer')
class MetaTrainer(Trainer):
def __init__(self,
model: Model,
meta_model: MetaModel,
optimizer: torch.optim.Optimizer,
iterator: DataIterator,
train_datasets: List[Iterable[Instance]],
validation_datasets: Optional[Iterable[Instance]] = None,
@isaacmg
isaacmg / Dockerfile
Last active February 10, 2020 21:20
FROM pytorch/pytorch:1.4-cuda10.1-cudnn7-devel
COPY requirements.txt /tmp/
RUN pip install -r /tmp/requirements.txt
ARG url
RUN git clone -n https://github.com/example/example_repo
RUN git checkout 543231
def train_epoch_loop(data_loader:DataLoader, opt:torch.optim, model:PyTorchForecast, takes_target:bool, forward_params={})
i = 0
running_loss = 0.0
for src, trg in data_loader:
opt.zero_grad()
# Convert to CPU/GPU/TPU
src = src.to(model.device)
trg = trg.to(model.device)
# TODO figure how to avoid
if takes_target:
label = ade_eval
model_type = spert
model_path = data/models/ade
tokenizer_path = data/models/ade
dataset_path = data/datasets/conll04/conll04_test.json
types_path = data/datasets/conll04/conll04_types.json
eval_batch_size = 1
rel_filter_threshold = 0.4
size_embedding = 25
prop_drop = 0.1