Created
February 5, 2013 18:32
-
-
Save cjdd3b/4716527 to your computer and use it in GitHub Desktop.
Using Gensim and heapq for scalable pairwise document comparisons.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
''' | |
pairwise.py | |
This script uses the Python Gensim library and heapq from the standard library to make | |
massively fast and scalable pairwise comparisons between an aribtrarily large number of | |
documents using TF-IDF and cosine distance. | |
The script first generates a similarity matrix between all documents in a set, then uses | |
heapq to retrieve the top K most similar matches to each document in that set. It has been | |
tested on sets as large as 400,000 documents on a Macbook Air. | |
Resources: | |
Gensim: http://radimrehurek.com/gensim/ | |
Gensim tutorials: http://radimrehurek.com/gensim/tutorial.html | |
Super helpful slideshare: http://www.slideshare.net/sandinmyjoints/an-introduction-to-gensim-topic-modelling-for-humans | |
Requirements: | |
- numpy (pip install numpy) | |
- scipy (pip install scipy) | |
- gensim (pip install gensim) | |
''' | |
import numpy, heapq | |
from gensim import similarities, corpora, models | |
from gensim.similarities.docsim import Similarity | |
########## CLASSES ########## | |
class DocCorpus(object): | |
''' | |
This is just the iterator from the tutorial with a couple modifications for cleanliness: | |
http://radimrehurek.com/gensim/tut1.html#corpus-streaming-one-document-at-a-time | |
''' | |
def __init__(self, texts, dict): | |
self.texts = texts | |
self.dict = dict | |
def __iter__(self): | |
for line in self.texts: | |
yield self.dict.doc2bow(line.lower().split()) | |
########## MAIN ########## | |
if __name__ == '__main__': | |
# First get documents to test (in this case from the Gensim tutorial) | |
documents = ["Human machine interface for lab abc computer applications", | |
"A survey of user opinion of computer system response time", | |
"The EPS user interface management system", | |
"System and human system engineering testing of EPS", | |
"Relation of user perceived response time to error measurement", | |
"The generation of random binary unordered trees", | |
"The intersection graph of paths in trees", | |
"Graph minors IV Widths of trees and well quasi ordering", | |
"Graph minors A survey"] | |
# Load into gensim dictionary object | |
dictionary = corpora.Dictionary(line.lower().split() for line in documents) | |
# Filtering, stopword removal and other cleaning happens here. In this case, we're only | |
# removing words that occur just once, but there's a lot more that could be done. | |
once_ids = [tokenid for tokenid, docfreq in dictionary.dfs.iteritems() if docfreq == 1] | |
dictionary.filter_tokens(once_ids) | |
# Compact dictionary | |
dictionary.compactify() | |
# Now create corpus and tfidf model | |
dc = DocCorpus(documents, dictionary) | |
tfidf = models.TfidfModel(dc) | |
# Create and iterate over the similarity matrix. With a document set of size N, this matrix will be | |
# N x N, which is normally too large to hold in memory for any N greater than several tens of thousands. | |
# As I understand it, Gensim overcomes this problem by using disk storage. | |
index = Similarity(corpus=tfidf[dc], num_features=tfidf.num_nnz, output_prefix="shard") | |
for i in index: | |
# Using nlargest from heapq changes complexity from the O(n * log(n)) of the sorted function | |
# to O(n * log(k)), which assuming we only want a small K is MUCH faster with huge arrays. | |
print heapq.nlargest(5, enumerate(i), key=lambda x: x[1]) # Output is of the format (document index, score) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment