Created
July 7, 2019 22:33
-
-
Save ZechCodes/133ec4e996a7644fc6ee1dea2b038115 to your computer and use it in GitHub Desktop.
Further messing around with NLP and extracting topics, using PoS analysis
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
""" | |
Further messing around with NLP and extracting topics. | |
Built a class for analyzing text to determine topics. It allows for part of speech analysis and has a pipeline for | |
cleaning the data set. It then uses the Gensim LDA model to determine which words are the topics. | |
Using the Gensim part of speech analysis to filter out everything but nouns, in conjunction with my previous | |
lemmatization and stemming I was able to get results that seem much better even with still using only the SKLearn news | |
group data set. | |
""" | |
from __future__ import annotations | |
from gensim.utils import simple_preprocess | |
from gensim.parsing.preprocessing import STOPWORDS | |
from nltk.stem import WordNetLemmatizer, SnowballStemmer | |
from typing import AnyStr, Callable, List, Set, Union | |
import gensim | |
import nltk | |
import numpy as np | |
import os.path | |
import sklearn.datasets | |
import sys | |
class TopicAnalyzer: | |
def __init__( | |
self, | |
dictionary_fname: AnyStr, | |
model_fname: AnyStr, | |
regen: bool = False, | |
seed: int = 400, | |
stopwords: Union[Set, frozenset]=gensim.parsing.preprocessing.STOPWORDS, | |
model_class: gensim.models.LdaModel=gensim.models.LdaMulticore, | |
part_of_speach: Callable=nltk.pos_tag, | |
verbose: Bool=False | |
): | |
self._dictionary = None | |
self._dictionary_fname = dictionary_fname | |
self._model = None | |
self._model_fname = model_fname | |
self._preparation_pipeline = [] | |
self._prepared_documents = [] | |
self._regen = regen | |
self._stopwords = set(stopwords) | |
self._model_class = model_class | |
self._dictionary_config = {} | |
self._dictionary_extremes_config = {} | |
self._model_config = {} | |
self._part_of_speach = part_of_speach | |
self._verbose = verbose | |
np.random.seed(seed) | |
@property | |
def dictionary(self) -> gensim.corpora.Dictionary: | |
if not self._dictionary: | |
if self.can_use_existing_model(): | |
self._load_dictionary() | |
else: | |
self._generate() | |
return self._dictionary | |
@property | |
def documents(self) -> List[AnyStr]: | |
return self._documents | |
@property | |
def model(self) -> gensim.models.ldamodel.LdaModel: | |
if not self._model: | |
if self.can_use_existing_model(): | |
self._load_model() | |
else: | |
self._generate() | |
return self._model | |
@property | |
def regen(self): | |
return self._regen | |
@property | |
def stopwords(self) -> Set: | |
return self._stopwords | |
def add_document(self, *documents: Tuple[AnyStr]) -> TopicAnalyzer: | |
""" Adds a document to the corpus that will be used to generate the dictionary and model. It will only store the | |
documents if the dictionary and model might be generated. """ | |
if not self.can_use_existing_model(): | |
index = 0 | |
self._progress(f"Adding {len(documents)} Documents") | |
for document in documents: | |
excerpt = document[:100].replace("\n", " ") | |
self._progress(f"{index+1:6}/{len(documents):6}: {excerpt:100} \r", "") | |
self._prepared_documents.append(self._prepare_document(document)) | |
index += 1 | |
self._progress("") | |
return self | |
def add_prep_step(self, step: Callable) -> TopicAnalyzer: | |
""" Adds a step to the document preparation pipeline that is used for cleaning documents before feeding them | |
into the dictionary and model. """ | |
self._preparation_pipeline.append(step) | |
return self | |
def add_stop_words(self, *words) -> TopicAnalyzer: | |
self._stopwords.update(words) | |
return self | |
def analyze_topics(self, document: AnyStr) -> Tuple[AnyStr]: | |
self._progress(f"Asked to analyze '{document[:25]}...'") | |
bow_vector = self.dictionary.doc2bow(self._prepare_document(document)) | |
return sorted(self.model[bow_vector], key=lambda tup: -1*tup[1]) | |
def configure_dictionary(self, **kwargs) -> TopicAnalyzer: | |
self._dictionary_config = kwargs | |
return self | |
def configure_dictionary_extremes(self, **kwargs) -> TopicAnalyzer: | |
self._dictionary_extremes_config = kwargs | |
return self | |
def configure_model(self, **kwargs) -> TopicAnalyzer: | |
self._model_config = kwargs | |
return self | |
def can_use_existing_model(self) -> bool: | |
""" If the user hasn't requested the model to be regenerated and both the dictionary and model files exist we | |
use them. """ | |
return ( | |
not self.regen | |
and os.path.exists(self._dictionary_fname) | |
and os.path.exists(self._model_fname) | |
) | |
def prepared_documents(self) -> List[List[AnyStr]]: | |
return self._prepared_documents | |
def _generate(self): | |
""" Generates and saves both the dictionary and the LDA model. """ | |
self._progress("Generating Dictionary") | |
self._dictionary = self._generate_dictionary() | |
self._progress(f"Saving Dictionary ({self._dictionary_fname})") | |
with open(self._dictionary_fname, "wb") as dictionary_file: | |
self._dictionary.save(dictionary_file) | |
self._progress("Generating Model") | |
self._model = self._generate_model() | |
self._progress(f"Saving Model ({self._model_fname})") | |
self._model.save(self._model_fname) | |
self._regen = False | |
def _generate_dictionary(self) -> gensim.corpora.Dictionary: | |
dictionary = gensim.corpora.Dictionary( | |
self.prepared_documents(), **self._dictionary_config | |
) | |
dictionary.filter_extremes(**self._dictionary_extremes_config) | |
return dictionary | |
def _generate_model(self) -> gensim.models.ldamodel.LdaModel: | |
bow_corpus = [self._dictionary.doc2bow(doc) for doc in self.prepared_documents()] | |
return self._model_class(bow_corpus, id2word=self._dictionary, **self._model_config) | |
def _load_dictionary(self): | |
self._dictionary = gensim.corpora.Dictionary.load(self._dictionary_fname) | |
def _load_model(self): | |
self._model = self._model_class.load(self._model_fname) | |
def _prepare_document(self, document: AnyStr) -> List[AnyStr]: | |
prepared_documents = [] | |
for token in self._part_of_speach(gensim.utils.simple_preprocess(document)): | |
word = token | |
if len(word) > 3 and word not in self.stopwords: | |
for step in self._preparation_pipeline: | |
word = step(word) | |
if not word: | |
break | |
if word: | |
prepared_documents.append(word) | |
return prepared_documents | |
def _progress(self, message: AnyStr, line_end="\n"): | |
if self._verbose: | |
sys.stdout.write(f"{message}{line_end}") | |
sys.stdout.flush() | |
if __name__ == "__main__": | |
nltk.download("wordnet") | |
nltk.download('averaged_perceptron_tagger') | |
stemmer = SnowballStemmer("english") | |
lemmatizer = WordNetLemmatizer() | |
newsgroups_train = sklearn.datasets.fetch_20newsgroups(subset="train", shuffle=True) | |
analyzer = TopicAnalyzer( | |
"dictionary.txt", | |
"lda-model.txt", | |
# regen=True, | |
part_of_speach=lambda tokens:( | |
word | |
for word, pos in nltk.pos_tag(tokens) | |
if pos in {"NN", "NNS", "NNP", "NNPS"} | |
), | |
verbose=True | |
) | |
analyzer.add_document(*newsgroups_train.data) | |
analyzer.add_prep_step( | |
lambda word: lemmatizer.lemmatize(word) | |
).add_prep_step(lambda word: stemmer.stem(word)) | |
analyzer.configure_dictionary_extremes(no_below=15, no_above=0.1, keep_n=100000) | |
analyzer.configure_model(num_topics=8, passes=10, workers=2) | |
documents = [( | |
"The knockout stage of the 2019 Cricket World Cup will see " | |
"two semi-finals, with the winners of each progressing to " | |
"the final at Lord's. The first semi-final will be held at " | |
"Old Trafford in Manchester and the second semi-final will " | |
"be held at Edgbaston in Birmingham just as they did back " | |
"in 1999, with all of the knockout games having a reserve " | |
"day. It will be the third time Edgbaston has hosted a " | |
"World Cup semi-final and the fourth semi-final to be held " | |
"at Old Trafford - a record for a World Cup venue. The " | |
"final will be held at Lord's in London for a record fifth " | |
"time.\n\nOn 25 June 2019, Australia became the first team " | |
"to qualify for the semi-finals, after beating England at " | |
"Lord's. India became the second team to qualify for the " | |
"semi-finals, after they defeated Bangladesh at Edgbaston " | |
"on 2 July 2019. The following day saw tournament hosts " | |
"England become the third team to qualify for the " | |
"semi-finals, after they beat New Zealand at the Riverside " | |
"Ground. New Zealand were the fourth and final team to " | |
"qualify for the semi-finals, after Pakistan were unable to " | |
"increase their net run rate sufficiently enough in their " | |
"match against Bangladesh at Lord's." | |
), "Four score and seven years ago our fathers brought forth on this continent, a new nation, conceived in Liberty, and dedicated to the proposition that all men are created equal. Now we are engaged in a great civil war, testing whether that nation, or any nation so conceived and dedicated, can long endure.", | |
"The unanimous Declaration of the thirteen united States of America, When in the Course of human events, it becomes necessary for one people to dissolve the political bands which have connected them with another, and to assume among the powers of the earth, the separate and equal station to which the Laws of Nature and of Nature's God entitle them, a decent respect to the opinions of mankind requires that they should declare the causes which impel them to the separation. We hold these truths to be self-evident, that all men are created equal, that they are endowed by their Creator with certain unalienable Rights, that among these are Life, Liberty and the pursuit of Happiness.--That to secure these rights, Governments are instituted among Men, deriving their just powers from the consent of the governed, --That whenever any Form of Government becomes destructive of these ends, it is the Right of the People to alter or to abolish it, and to institute new Government, laying its foundation on such principles and organizing its powers in such form, as to them shall seem most likely to effect their Safety and Happiness. Prudence, indeed, will dictate that Governments long established should not be changed for light and transient causes; and accordingly all experience hath shewn, that mankind are more disposed to suffer, while evils are sufferable, than to right themselves by abolishing the forms to which they are accustomed. But when a long train of abuses and usurpations, pursuing invariably the same Object evinces a design to reduce them under absolute Despotism, it is their right, it is their duty, to throw off such Government, and to provide new Guards for their future security.--Such has been the patient sufferance of these Colonies; and such is now the necessity which constrains them to alter their former Systems of Government. The history of the present King of Great Britain is a history of repeated injuries and usurpations, all having in direct object the establishment of an absolute Tyranny over these States. To prove this, let Facts be submitted to a candid world.", | |
"Popular part of speech taggers (NLTK POS tagger, Stanford POS tagger) often make mistakes in the CV’s phrases tagging task. The reason is that often a CV text neglects grammar in order to highlight experience and to give it some structure (people start sentences with a predicate, not with a subject, sometimes phrases miss appropriate grammatical structure), a lot of words are specific terms or names. We had to write our own POS tagger solving the aforementioned problems. The classification is performed with a Keras neural network with three input layers each designed to take special class of data. The first input layer takes a variable length vector comprised of the described above features of the candidate phrases which could have arbitrary number of words. This feature vector is processed with an LSTM layer.", | |
"One day you decided to create a navigation app for casual travelers. The app was centered around a beautiful map which helped users quickly orient themselves in any city. One of the most requested features for the app was automatic route planning. A user should be able to enter an address and see the fastest route to that destination displayed on the map. The first version of the app could only build the routes over roads. People who traveled by car were bursting with joy. But apparently, not everybody likes to drive on their vacation. So with the next update, you added an option to build walking routes. Right after that, you added another option to let people use public transport in their routes. However, that was only the beginning. Later you planned to add route building for cyclists. And even later, another option for building routes through all of a city’s tourist attractions.", | |
'"Hi- yi ! You\'re up a stump, ain\'t you!" No answer. Tom surveyed his last touch with the eye of an artist, then he gave his brush another gentle sweep and surveyed the result, as before. Ben ranged up alongside of him. Tom\'s mouth watered for the apple, but he stuck to his work. Ben said: "Hello, old chap, you got to work, hey?" Tom wheeled suddenly and said: "Why, it\'s you, Ben! I warn\'t noticing." "Say -- I\'m going in a-swimming, I am. Don\'t you wish you could? But of course you\'d druther work -- wouldn\'t you? Course you would!" Tom contemplated the boy a bit, and said: "What do you call work?" "Why, ain\'t that work?"', | |
"Our job is to love others without stopping to inquire whether or not they are worthy.", | |
"You never know which thing you do is going to turn out to be important."] | |
for document in documents: | |
topics = analyzer.analyze_topics(document) | |
print(f"\n--- --- Analyze --- ---\n\n{document}\n\n--- --- Analysis --- ---\n") | |
for index, score in topics: | |
print(analyzer.model.show_topic(index, 5)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
boto==2.49.0 | |
boto3==1.9.183 | |
botocore==1.12.183 | |
certifi==2019.6.16 | |
chardet==3.0.4 | |
docutils==0.14 | |
gensim==3.7.3 | |
idna==2.8 | |
jmespath==0.9.4 | |
joblib==0.13.2 | |
nltk==3.4.4 | |
numpy==1.16.4 | |
pandas==0.24.2 | |
python-dateutil==2.8.0 | |
pytz==2019.1 | |
requests==2.22.0 | |
s3transfer==0.2.1 | |
scikit-learn==0.21.2 | |
scipy==1.3.0 | |
six==1.12.0 | |
sklearn==0.0 | |
smart-open==1.8.4 | |
urllib3==1.25.3 |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment