In this walkthrough you will learn to serve a multi-part PyTorch model with Seldon. +* +*
s2i build . seldonio/seldon-core-s2i-python3:0.10 kubeflow/tts_encoder
docker run --name "EncoderServe" --rm -p 5000:5000 kubeflow/tts_encoder
# ML Core | |
scikit-learn==0.22.1 | |
pandas==1.01 | |
torch==1.40 | |
tb-nightly==2.2.0 | |
# Graphs and param management | |
seaborn==0.10.0 | |
future==0.18.2 | |
wandb==0.8.25 | |
# GCP dependencies |
class SimpleTransformer(torch.nn.Module): | |
def __init__(self, n_time_series, seq_len, d_model=128): | |
super().__init__() | |
self.dense_shape = torch.nn.Linear(n_time_series, d_model) | |
self.pe = SimplePositionalEncoding(d_model) | |
self.transformer = Transformer(d_model, nhead=8) | |
self.final_layer = torch.nn.Linear(d_model, 1) | |
self.sequence_size = seq_len | |
def forward(self, x, t, tgt_mask, src_mask=None): | |
if src_mask: |
class AttendDiagnose(nn.Module): | |
def __init__(self, number_measurements, filter_number): | |
super().__init__() | |
self.d_model = filter_number*number_measurements | |
self.embedding_conv = nn.Conv1d(number_measurements, filter_number*number_measurements, 1) | |
self.pe = PositionalEncoding(filter_number*number_measurements) | |
# embed_dim and attention_heads | |
self.masked_attn = nn.modules.activation.MultiheadAttention(filter_number*number_measurements, 8) | |
self.norm = nn.modules.normalization.LayerNorm(self.d_model) | |
self.final_layer = nn.Linear(self.d_model, 1) |
!allennlp train babi_train_meta.jsonnet -s /tmp/serialization_dir --include-package allennlp.training.metatrainer |
""" Use torchMoji to predict emojis from a single text input | |
""" | |
from __future__ import print_function, division, unicode_literals | |
import examples.example_helper | |
import json | |
import csv | |
import argparse | |
import numpy as np |
for i,chunk in enumerate(pd.read_csv('bigfile.csv', chunksize=500000)): | |
chunk.to_csv('chunk{}.csv'.format(i)) |
import requests | |
from requests.auth import HTTPBasicAuth | |
import lxml.html | |
import requests, zipfile, io | |
user_name = "Replace this with your MIMIC username" | |
your_password = "Replace this with your MIMIC password" | |
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 | |
(KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'} | |
response = requests.get("https://physionet.org/works/MIMICIIIClinicalDatabase/files/", | |
auth=HTTPBasicAuth(user_name, your_password), headers=headers) |
.followedBy("End").where(new IterativeCondition<Tuple2<String, Integer>>() { | |
@Override | |
public boolean filter(Tuple2<String, Integer> stringIntegerTuple2, Context<Tuple2<String, Integer>> context) throws Exception { | |
List<Tuple2<String,Integer>> s = Lists.newArrayList(context.getEventsForPattern("End")); | |
int i = s.size(); | |
int value = stringIntegerTuple2.getField(1); | |
int prevValue = s.get(i-1).getField(1); | |
return value>prevValue; | |
} | |
}); |
Pattern<Tuple2<String, Integer>, ?> pattern = | |
Pattern.<Tuple2<String,Integer>>begin("first") | |
.where(new SimpleCondition2(15)).followedBy("increasing") | |
.where(new SimpleCondition2(20)) | |
PatternStream<Tuple2<String, Integer>> patternStream = | |
CEP.pattern(dataWindowKafka.keyBy(0), pattern); | |
DataStream<String> manyMentions = patternStream | |
.select(new PatternSelectFunction<Tuple2<String, Integer>, String>() { | |
@Override |