Created
September 1, 2016 00:34
-
-
Save aneesha/a54e7e182ddf564f6a5c1fe50db556a2 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer | |
from sklearn.datasets import fetch_20newsgroups | |
from sklearn.decomposition import NMF, LatentDirichletAllocation | |
def display_topics(model, feature_names, no_top_words): | |
for topic_idx, topic in enumerate(model.components_): | |
print "Topic %d:" % (topic_idx) | |
print " ".join([feature_names[i] | |
for i in topic.argsort()[:-no_top_words - 1:-1]]) | |
dataset = fetch_20newsgroups(shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes')) | |
documents = dataset.data | |
no_features = 1000 | |
# NMF is able to use tf-idf | |
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=no_features, stop_words='english') | |
tfidf = tfidf_vectorizer.fit_transform(documents) | |
tfidf_feature_names = tfidf_vectorizer.get_feature_names() | |
# LDA can only use raw term counts for LDA because it is a probabilistic graphical model | |
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2, max_features=no_features, stop_words='english') | |
tf = tf_vectorizer.fit_transform(documents) | |
tf_feature_names = tf_vectorizer.get_feature_names() | |
no_topics = 20 | |
# Run NMF | |
nmf = NMF(n_components=no_topics, random_state=1, alpha=.1, l1_ratio=.5, init='nndsvd').fit(tfidf) | |
# Run LDA | |
lda = LatentDirichletAllocation(n_topics=no_topics, max_iter=5, learning_method='online', learning_offset=50.,random_state=0).fit(tf) | |
no_top_words = 10 | |
display_topics(nmf, tfidf_feature_names, no_top_words) | |
display_topics(lda, tf_feature_names, no_top_words) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment