Created
May 26, 2022 14:43
-
-
Save Dref360/e9b465766ce73b50467d95d77c80b54b to your computer and use it in GitHub Desktop.
Get most similar sentence by comparing to "important words"
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from pprint import pprint | |
import datasets | |
import numpy as np | |
import torch | |
from sklearn.metrics.pairwise import cosine_similarity | |
from tqdm import tqdm | |
from transformers import AutoTokenizer, AutoModel | |
""" | |
Taken from https://discuss.huggingface.co/t/generate-raw-word-embeddings-using-transformer-models-like-bert-for-downstream-process/2958 | |
""" | |
def get_word_idx(sent: str, word: str): | |
return sent.split(" ").index(word) | |
def get_hidden_states(encoded, model, layers): | |
"""Push input IDs through model. Stack and sum `layers` (last four by default). | |
Select only those subword token outputs that belong to our word of interest | |
and average them.""" | |
with torch.no_grad(): | |
output = model(**encoded) | |
# Get all hidden states | |
states = output.hidden_states | |
# Stack and sum all requested layers | |
output = torch.stack([states[i] for i in layers]).sum(0).squeeze() | |
return output | |
def get_word_vector(sent, idx, tokenizer, model, layers): | |
"""Get a word vector by first tokenizing the input sentence, getting all token idxs | |
that make up the word of interest, and then `get_hidden_states`.""" | |
encoded = tokenizer.encode_plus(sent, return_tensors="pt") | |
# get all token idxs that belong to the word of interest | |
token_ids_word = np.where(np.array(encoded.word_ids()) == idx) | |
output = get_hidden_states(encoded, model, layers) | |
# Only select the tokens that constitute the requested word | |
word_tokens_output = output[token_ids_word] | |
return word_tokens_output.mean(0) | |
def get_embeddings_for_sentence(sent, tokenizer, model, layers): | |
encoded = tokenizer.encode_plus(sent, return_tensors="pt") | |
output = get_hidden_states(encoded, model, layers) | |
return output | |
def main(layers=None): | |
# Use last four layers by default | |
layers = [-4, -3, -2, -1] if layers is None else layers | |
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") | |
model = AutoModel.from_pretrained("bert-base-uncased", output_hidden_states=True) | |
ds = datasets.load_dataset("clinc_oos", "small")["validation"].shuffle(seed=2022) | |
text = ds["text"][:1000] | |
# Get embedding for the word "upgrade" | |
sent = "I want to upgrade my account ." | |
idx = get_word_idx(sent, "upgrade") | |
# Get embedding for all sentences | |
word_embedding = get_word_vector(sent, idx, tokenizer, model, layers)[np.newaxis] | |
sentences_embedding = [get_embeddings_for_sentence(s, tokenizer, model, layers) | |
for s in tqdm(text, desc="Computing word embeddings")] | |
# Compare with cosine similarity | |
max_sim = [cosine_similarity(word_embedding, sentence).max() for sentence in sentences_embedding] | |
top_5 = np.argsort(max_sim)[::-1][:5] | |
print("Most similar sentences to the word 'upgrade'") | |
pprint([ | |
text[i] for i in top_5 | |
]) | |
if __name__ == '__main__': | |
main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment