Python:經過gensim和jieba分詞進行文本類似度分析

#! -*- coding:utf-8 -*-python


import pymongo
import codecs,sys
from pymongo import MongoClient
import jieba
from gensim import corpora, models, similarities
import nltk
import jieba.analyse
from nltk.tokenize import word_tokenize
from pprint import pprint # pretty-printer

reload(sys)
sys.setdefaultencoding('utf-8')

kickpath="" #"/root/python/"

dics=[]
dits={}
labels={}
count=1
mydoclist =[]
courses=[]
questions=[]
uuids=[]


#經過jieba中文分詞生成詞條
def jieba_preprocess_cn(courses, low_freq_filter = True):
    #jieba.analyse.set_stop_words("../extra_dict/stop_words.txt")
    #jieba.analyse.set_idf_path("../extra_dict/idf.txt.big");
    texts_tokenized = []
    for document in courses:
        texts_tokenized_tmp = []
        words= jieba.cut(document,cut_all=True)
        tages= jieba.analyse.extract_tags(document,500)
        texts_tokenized.append(tages)

    texts_filtered_stopwords = texts_tokenized
    pprint(texts_filtered_stopwords)

    #去除標點符號
    english_punctuations = [',''.'':'';''?''('')''['']''&''!''*''@''#''$''%']
    texts_filtered = [[word for word in document if not word in english_punctuations] for document intexts_filtered_stopwords]

    #去除太低頻詞
    if low_freq_filter:
        # remove words that appear only once
        from collections import defaultdict
        frequency = defaultdict(int)
        for text in texts_filtered:
            for token in text:
                frequency[token] += 1
        texts = [[token for token in text if frequency[token] > 1for text in texts_filtered]
    else:
        texts = texts_filtered
        pprint(texts)
    return texts

def train_by_lsi(lib_texts):
    #爲了能看到過程日誌
    #import logging
    #logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)

    dictionary = corpora.Dictionary(lib_texts)
    corpus = [dictionary.doc2bow(text) for text in lib_texts] #doc2bow(): 將collection words 轉爲詞袋,用兩元組(word_id, word_frequency)表示
    tfidf = models.TfidfModel(corpus)
    corpus_tfidf = tfidf[corpus]

    #拍腦殼的:訓練topic數量爲10的LSI模型
    lsi = models.LsiModel(corpus_tfidf, id2word=dictionary) #, num_topics=10)
    index = similarities.MatrixSimilarity(lsi[corpus]) # index 是 gensim.similarities.docsim.MatrixSimilarity 實例

    dictionary.save(kickpath+"kick.dict")
    lsi.save(kickpath+"kick.lsi")
    index.save(kickpath+"kick.index")
    return (index, dictionary, lsi)


if __name__ == '__main__':
conn = MongoClient("xxx"27017)
db = conn.health
db.authenticate("xx""xxx")
content = db.kickchufang.find({'doctorId':'huanghuang'})
index=0
for in content:
line = str(i['desc']) #.decode("utf-8") #.encode("GB18030"))
#print "line:",line
uuid = i['uuid']
uuids.append(uuid)
#print uuid, line
courses.append(line)
print str(index)
index=index+1
#if (index>10):
# break

man_file = open(kickpath+"kick.uuids"'w')
print(uuids, man_file)
man_file.close()
courses_name = courses

# 庫創建完成 -- 這部分可能數據很大,能夠預先處理好,存儲起來 lib_texts = jieba_preprocess_cn(courses) (index, dictionary, lsi) = train_by_lsi(lib_texts)
相關文章
相關標籤/搜索