Python_LDA实现方法详解
LDA(LatentDirichletallocation)模型是一种常用而用途广泛地概率主题模型。其实现一般通过Variationalinference和GibbsSamping实现。作者在提出LDA模型时给出了其变分推理的C源码(后续贴出C++改编的类),这里贴出基于Python的第三方模块改写的LDA类及实现。
#coding:utf-8
importnumpyasnp
importlda
importlda.datasets
importjieba
importcodecs
classLDA_v20161130():
def__init__(self,topics=2):
self.n_topic=topics
self.corpus=None
self.vocab=None
self.ppCountMatrix=None
self.stop_words=[u',',u'。',u'、',u'(',u')',u'·',u'!',u'',u':',u'“',u'”',u'\n']
self.model=None
defloadCorpusFromFile(self,fn):
#中文分词
f=open(fn,'r')
text=f.readlines()
text=r''.join(text)
seg_generator=jieba.cut(text)
seg_list=[iforiinseg_generatorifinotinself.stop_words]
seg_list=r''.join(seg_list)
#切割统计所有出现的词纳入词典
seglist=seg_list.split("")
self.vocab=[]
forwordinseglist:
if(word!=u''andwordnotinself.vocab):
self.vocab.append(word)
CountMatrix=[]
f.seek(0,0)
#统计每个文档中出现的词频
forlineinf:
#置零
count=np.zeros(len(self.vocab),dtype=np.int)
text=line.strip()
#但还是要先分词
seg_generator=jieba.cut(text)
seg_list=[iforiinseg_generatorifinotinself.stop_words]
seg_list=r''.join(seg_list)
seglist=seg_list.split("")
#查询词典中的词出现的词频
forwordinseglist:
ifwordinself.vocab:
count[self.vocab.index(word)]+=1
CountMatrix.append(count)
f.close()
#self.ppCountMatrix=(len(CountMatrix),len(self.vocab))
self.ppCountMatrix=np.array(CountMatrix)
print"loadcorpusfrom%ssuccess!"%fn
defsetStopWords(self,word_list):
self.stop_words=word_list
deffitModel(self,n_iter=1500,_alpha=0.1,_eta=0.01):
self.model=lda.LDA(n_topics=self.n_topic,n_iter=n_iter,alpha=_alpha,eta=_eta,random_state=1)
self.model.fit(self.ppCountMatrix)
defprintTopic_Word(self,n_top_word=8):
fori,topic_distinenumerate(self.model.topic_word_):
topic_words=np.array(self.vocab)[np.argsort(topic_dist)][:-(n_top_word+1):-1]
print"Topic:",i,"\t",
forwordintopic_words:
printword,
print
defprintDoc_Topic(self):
foriinrange(len(self.ppCountMatrix)):
print("Doc%d:((toptopic:%s)topicdistribution:%s)"%(i,self.model.doc_topic_[i].argmax(),self.model.doc_topic_[i]))
defprintVocabulary(self):
print"vocabulary:"
forwordinself.vocab:
printword,
print
defsaveVocabulary(self,fn):
f=codecs.open(fn,'w','utf-8')
forwordinself.vocab:
f.write("%s\n"%word)
f.close()
defsaveTopic_Words(self,fn,n_top_word=-1):
ifn_top_word==-1:
n_top_word=len(self.vocab)
f=codecs.open(fn,'w','utf-8')
fori,topic_distinenumerate(self.model.topic_word_):
topic_words=np.array(self.vocab)[np.argsort(topic_dist)][:-(n_top_word+1):-1]
f.write("Topic:%d\t"%i)
forwordintopic_words:
f.write("%s"%word)
f.write("\n")
f.close()
defsaveDoc_Topic(self,fn):
f=codecs.open(fn,'w','utf-8')
foriinrange(len(self.ppCountMatrix)):
f.write("Doc%d:((toptopic:%s)topicdistribution:%s)\n"%(i,self.model.doc_topic_[i].argmax(),self.model.doc_topic_[i]))
f.close()
算法实现demo:
例如,抓取BBC川普当选的新闻作为语料,输入以下代码:
if__name__=="__main__":
_lda=LDA_v20161130(topics=20)
stop=[u'!',u'@',u'#',u',',u'.',u'/',u';',u'',u'[',u']',u'$',u'%',u'^',u'&',u'*',u'(',u')',
u'"',u':',u'<',u'>',u'?',u'{',u'}',u'=',u'+',u'_',u'-',u'''''']
_lda.setStopWords(stop)
_lda.loadCorpusFromFile(u'C:\\Users\Administrator\Desktop\\BBC.txt')
_lda.fitModel(n_iter=1500)
_lda.printTopic_Word(n_top_word=10)
_lda.printDoc_Topic()
_lda.saveVocabulary(u'C:\\Users\Administrator\Desktop\\vocab.txt')
_lda.saveTopic_Words(u'C:\\Users\Administrator\Desktop\\topic_word.txt')
_lda.saveDoc_Topic(u'C:\\Users\Administrator\Desktop\\doc_topic.txt')
因为语料全部为英文,因此这里的stop_words全部设置为英文符号,主题设置20个,迭代1500次。结果显示,文档148篇,词典1347词,总词数4174,在i3的电脑上运行17s。
Topic_words部分输出如下:
Topic:0
towillandofhebetrumpsthewhatpolicy
Topic:1hewouldinsaidnotnowithmrthisbut
Topic:2fororcansomewhetherhavechangehealthobamacareinsurance
Topic:3thetothatpresidentasofusalsofirstall
Topic:4trumptowhenwithnowwererepublicanmrofficepresidential
Topic:5thehistrumpfromukwhopresidenttoamericanhouse
Topic:6atothatwasitbyissuevotewhilemarriage
Topic:7thetoofanaretheywhichbycouldfrom
Topic:8ofthestatesonevotesplannedwontwonewclinton
Topic:9inusauseforobamalawentrynewinterview
Topic:10andonimmigrationhasthattherewebsitevettingactiongiven
Doc_Topic部分输出如下:
Doc0:((toptopic:4)topicdistribution:[0.029729730.00270270.00270270.164864860.327027030.19189189
0.00270270.00270270.029729730.00270270.029729730.0027027
0.00270270.00270270.029729730.00270270.029729730.0027027
0.137837840.0027027])
Doc1:((toptopic:18)topicdistribution:[0.210.010.010.010.010.010.010.010.110.010.010.01
0.010.010.010.010.010.010.310.21])
Doc2:((toptopic:18)topicdistribution:[0.020754720.001886790.039622640.001886790.001886790.00188679
0.001886790.152830190.001886790.020754720.001886790.24716981
0.001886790.077358490.001886790.001886790.001886790.00188679
0.416981130.00188679])
当然,对于英文语料,需要排除大部分的虚词以及常用无意义词,例如it,this,there,that...在实际操作中,需要合理地设置参数。
换中文语料尝试,采用习大大就卡斯特罗逝世发表的吊唁文章和朴槿惠辞职的新闻。
Topic:0
的同志和人民卡斯特罗菲德尔古巴他了我
Topic:1在朴槿惠向表示总统对将的月国民
Doc0:((toptopic:0)topicdistribution:[0.917141230.08285877])
Doc1:((toptopic:1)topicdistribution:[0.092006660.90799334])
还是存在一些虚词,例如“的”,“和”,“了”,“对”等词的干扰,但是大致来说,两则新闻的主题分布很明显,效果还不赖。
以上就是本文关于Python_LDA实现方法详解的全部内容,希望对大家有所帮助。感兴趣的朋友可以继续参阅本站:python+mongodb数据抓取详细介绍、Python探索之创建二叉树、Python探索之修改Python搜索路径等,有什么问题可以随时留言,欢迎大家一起交流讨论。感谢朋友们对本站的支持!