Created
October 4, 2018 08:11
-
-
Save infinityfuture/9ea6d1548c88c40f61f0fbbea53307ab to your computer and use it in GitHub Desktop.
Use TextRank algorithm to generate summary, using word2vec as sentence similarity
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
""" | |
Reference: | |
http://www.hankcs.com/nlp/textrank-algorithm-to-extract-the-keywords-java-implementation.html | |
http://www.hankcs.com/nlp/textrank-algorithm-java-implementation-of-automatic-abstract.html | |
Chinese Embedding From | |
https://github.com/Embedding/Chinese-Word-Vectors | |
""" | |
import gensim | |
import numpy as np | |
def get_sentence_embedding(sentence, model): | |
vec_sum = None | |
add = 0 | |
for word in sentence: | |
if word in model.vocab: | |
if vec_sum is None: | |
vec_sum = np.copy(model[word]) | |
else: | |
vec_sum += np.copy(model[word]) | |
add += 1 | |
return vec_sum / add | |
def get_similarity_matrix(corpus, model): | |
corpus_embedding = np.array([ | |
get_sentence_embedding(sent, model) | |
for sent in corpus | |
]) | |
return np.matmul(corpus_embedding, corpus_embedding.T) | |
def new_ws(i, word_i, ws, corpus, similarity_matrix, d=0.85): | |
size = len(weight) | |
s = 0 | |
for j in range(size): | |
if j == i: continue | |
w_j_i = similarity_matrix[j][i] | |
weight_sum_j = np.sum(similarity_matrix[j]) | |
s += d * w_j_i / weight_sum_j * ws[j] | |
s = (1 - d) + s | |
return s | |
corpus = [ | |
["算法", "大致", "分", "基本", "算法", "数据", "结构", "算法", "数论", "算法", "计算", "几何", "算法", "图", "算法", "动态", "规划", "数值", "分析", "加密", "算法", "排序", "算法", "检索", "算法", "随机", "化", "算法", "并行", "算法", "厄", "米", "变形", "模型", "随机", "森林", "算法"], | |
["算法", "宽泛", "分为", "三类"], | |
["有限", "确定性", "算法"], | |
["类", "算法", "有限", "一段", "时间", "终止"], | |
["可能", "花", "长", "时间", "执行", "指定", "任务"], | |
["一定", "时间", "终止"], | |
["类", "算法", "得出", "常", "取决", "输入", "值"], | |
["二"], | |
["有限", "非", "确定", "算法"], | |
["类", "算法", "有限", "时间", "终止"], | |
["一个", "定", "数值"], | |
["算法", "唯一", "确定"], | |
["三"], | |
["无限", "算法"], | |
["没有", "定义", "终止", "定义", "条件"], | |
["定义", "条件", "无法", "输入", "数据", "满足", "终止", "运行", "算法"], | |
["通常"], | |
["无限", "算法", "产生", "未", "确定", "定义", "终止", "条件"] | |
] | |
model = gensim.models.KeyedVectors.load_word2vec_format( | |
'sgns.baidubaike.bigram-char', binary=False) | |
similarity_matrix = get_similarity_matrix(corpus, model) | |
weight = np.ones(len(corpus)) | |
max_iter = 200 | |
tol = 1e-3 | |
for i in range(max_iter): | |
print(i) | |
new_weight = np.array([ | |
new_ws(i, word_i, weight, corpus, similarity_matrix) | |
for i, word_i in enumerate(corpus) | |
]) | |
if np.sum((weight - new_weight) ** 2) < tol: | |
break | |
weight = new_weight | |
print(sorted(list(zip(corpus, weight)), key=lambda x: x[1], reverse=True)) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment