jieba分詞及詞頻統計小項目

import pandas as pd
import jieba
import jieba.analyse
from collections import Counter,OrderedDict
jieba.load_userdict('./userdict.txt')  # 加載外部 用戶詞典


def stopwordslist(filepath):
    stopwords = [line.strip() for line in open(filepath, 'r', encoding='utf-8').readlines()]
    return stopwords


def text_cut(text1):
    stopwords = stopwordslist('./stop_words.txt')  # 這裏加載停用詞的路徑
    words = jieba.analyse.extract_tags(text1, topK=6, withWeight=False, allowPOS=('ns', 'n', 'vn', 'v', 'm', 'q'))
    santi_words = [x for x in words if len(x) > 1 and x not in stopwords]
    return ','.join(santi_words)


def cut_term():
    data = pd.read_excel('./xxx.xlsx', sheet_name='Sheet3')

    data['term'] = data['合併'].apply(text_cut)
    print(data.head())
    data.to_excel('./Q2-xxxx_new2.xlsx', index=False)


def make_count(data):
    writer = pd.ExcelWriter('./Q2分行業分詞結果11.xlsx', engine='xlsxwriter')
    all_industry = ['xxxx]
    for industry in all_industry:
        cut = data[data['一xxx']==industry]['term'].tolist()
        l = []
        for i in cut:
            l.extend(i.split(','))
        print(len(l))
        term_dic = dict(Counter(l))
        d = dict(sorted(term_dic.items(), key=lambda x: x[1], reverse=True))
        k = list(d.keys())
        v = list(d.values())
        df = pd.DataFrame({f'{industry}-詞': k,'頻率':v }, columns=[f'{industry}-詞', '頻率'])
        df.to_excel(writer,sheet_name=industry,index=False)
    writer.close()


data = pd.read_excel('./xxxxxx.xlsx', sheet_name='Sheet1')

make_count(data)
相關文章
相關標籤/搜索