This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
class ActionOrderProduct(Action): | |
def name(self): | |
return 'action_order_product' | |
def run(self, dispatcher, tracker, domain): | |
router = tracker.get_slot('router') | |
confirmationNumber = 123456 #later generate through some process | |
response = """Your product {} is ordered for you. It will be shipped to your address. Your confirmation number is {}""".format(router, confirmationNumber) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def train_nlu(data, configuration, model_dir): | |
training_data = load_data(data) | |
trainer = Trainer(config.load(configuration)) | |
trainer.train(training_data) | |
model_directory = trainer.persist(model_dir, fixed_model_name = 'customernlu') | |
return model_directory |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def train_dialogue(domain_file = 'customer_domain.yml', | |
model_path = './models/dialogue', | |
training_data_file = './data/stories.md'): | |
agent = Agent(domain_file, policies = [MemoizationPolicy(), KerasPolicy()]) | |
agent.train( | |
training_data_file, | |
epochs = 300, | |
batch_size = 50, |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def EmbeddingRec(EMBEDDING_SIZE, NUM_MOVIES, NUM_USERS, ROW_COUNT): | |
movie_input = keras.Input(shape=(1,), name='movie_id') | |
movie_emb = layers.Embedding(output_dim=EMBEDDING_SIZE, input_dim=NUM_MOVIES, input_length=ROW_COUNT, name='movie_emb')(movie_input) | |
movie_vec = layers.Flatten(name='FlattenMovie')(movie_emb) | |
movie_model = keras.Model(inputs=movie_input, outputs=movie_vec) | |
user_input = keras.Input(shape=(1,), name='user_id') |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#lets assume an user ID 200 | |
TEST_USER_ID = 200 | |
#get the embedding of this user | |
user_embedding = user_model.predict([TEST_USER_ID]).reshape(1,-1)[0] | |
#create the KNN model | |
from sklearn.neighbors import KNeighborsClassifier | |
clf = KNeighborsClassifier(n_neighbors=11) | |
clf.fit(MOVIE_EMBEDDING_LIST, knn_train_label) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
contraction_map = {"ain't": "is not", "aren't": "are not","can't": "cannot", "'cause": "because", "could've": "could have", "couldn't": "could not", | |
"didn't": "did not", "doesn't": "does not", "don't": "do not", "hadn't": "had not", "hasn't": "has not", "haven't": "have not", | |
"he'd": "he would","he'll": "he will", "he's": "he is", "how'd": "how did", "how'd'y": "how do you", "how'll": "how will", "how's": "how is", | |
"I'd": "I would", "I'd've": "I would have", "I'll": "I will", "I'll've": "I will have","I'm": "I am", "I've": "I have", "i'd": "i would", | |
"i'd've": "i would have", "i'll": "i will", "i'll've": "i will have","i'm": "i am", "i've": "i have", "isn't": "is not", "it'd": "it would", |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import pandas as pd | |
import numpy as np | |
import json | |
import os, glob | |
from __future__ import unicode_literals, print_function, division | |
from io import open | |
import unicodedata | |
import string | |
import re |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
''' | |
Reusable set of functions to convert a tuple of strings (pair) to tensors | |
Reference: https://pytorch.org/tutorials/intermediate/seq2seq_translation_tutorial.html | |
''' | |
import torch | |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
def indexesFromSentence(lang, sentence): |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from rouge_score import rouge_scorer | |
def read_input(filename = 'evaluation_input.txt'): | |
input_pair = [] | |
# read evaluation_input.txt | |
# append each line to input_pair | |
return input_pair | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
class Encoder(tf.keras.Model): | |
def __init__(self, vocab_size, embedding_dim, enc_units, batch_sz): | |
''' | |
vocab_size: number of unique words | |
embedding_dim: dimension of your embedding output | |
enc_units: how many units of RNN cell | |
batch_sz: batch of data passed to the training in each epoch | |
''' | |
super(Encoder, self).__init__() | |
self.batch_sz = batch_sz |
OlderNewer