中文詞頻統計與詞雲生成

本次做業來源於:https://edu.cnblogs.com/campus/gzcc/GZCC-16SE1/homework/2822html

中文詞頻統計app

1. 下載一長篇中文小說。dom

 

2. 從文件讀取待分析文本。url

 

# -*- coding: utf-8 -*-
import struct
import os
 
# 拼音表偏移,
startPy = 0x1540;
 
# 漢語詞組表偏移
startChinese = 0x2628;
 
# 全局拼音表
GPy_Table = {}
 
# 解析結果
# 元組(詞頻,拼音,中文詞組)的列表
 
 
# 原始字節碼轉爲字符串
def byte2str(data):
    pos = 0
    str = ''
    while pos < len(data):
        c = chr(struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0])
        if c != chr(0):
            str += c
        pos += 2
    return str
 
# 獲取拼音表
def getPyTable(data):
    data = data[4:]
    pos = 0
    while pos < len(data):
        index = struct.unpack('H', bytes([data[pos],data[pos + 1]]))[0]
        pos += 2
        lenPy = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0]
        pos += 2
        py = byte2str(data[pos:pos + lenPy])
 
        GPy_Table[index] = py
        pos += lenPy
 
# 獲取一個詞組的拼音
def getWordPy(data):
    pos = 0
    ret = ''
    while pos < len(data):
        index = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0]
        ret += GPy_Table[index]
        pos += 2
    return ret
 
# 讀取中文表
def getChinese(data):
    GTable = []
    pos = 0
    while pos < len(data):
        # 同音詞數量
        same = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0]
 
        # 拼音索引表長度
        pos += 2
        py_table_len = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0]
 
        # 拼音索引表
        pos += 2
        py = getWordPy(data[pos: pos + py_table_len])
 
        # 中文詞組
        pos += py_table_len
        for i in range(same):
            # 中文詞組長度
            c_len = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0]
            # 中文詞組
            pos += 2
            word = byte2str(data[pos: pos + c_len])
            # 擴展數據長度
            pos += c_len
            ext_len = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0]
            # 詞頻
            pos += 2
            count = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0]
 
            # 保存
            GTable.append((count, py, word))
 
            # 到下個詞的偏移位置
            pos += ext_len
    return GTable
 
 
def scel2txt(file_name):
    print('-' * 60)
    with open(file_name, 'rb') as f:
        data = f.read()
 
    print("詞庫名:", byte2str(data[0x130:0x338])) # .encode('GB18030')
    print("詞庫類型:", byte2str(data[0x338:0x540]))
    print("描述信息:", byte2str(data[0x540:0xd40]))
    print("詞庫示例:", byte2str(data[0xd40:startPy]))
 
    getPyTable(data[startPy:startChinese])
    getChinese(data[startChinese:])
    return getChinese(data[startChinese:])
 
if __name__ == '__main__':
    # scel所在文件夾路徑
    in_path = r"F:\text"   #修改成你的詞庫文件存放文件夾
    # 輸出詞典所在文件夾路徑
    out_path = r"F:\text"  # 轉換以後文件存放文件夾
    fin = [fname for fname in os.listdir(in_path) if fname[-5:] == ".scel"]
    for f in fin:
        try:
            for word in scel2txt(os.path.join(in_path, f)):
                file_path=(os.path.join(out_path, str(f).split('.')[0] + '.txt'))
                # 保存結果
                with open(file_path,'a+',encoding='utf-8')as file:
                    file.write(word[2] + '\n')
            os.remove(os.path.join(in_path, f))
        except Exception as e:
            print(e)
            pass

3. 生成詞雲code

 

import requests
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
import re
import jieba
from wordcloud import WordCloud
import matplotlib.pyplot as plt

def get_txt_from_net():
    c_str = []
    ua = UserAgent()
    headers = {'User_Agent': ua.random}
    for i in range(1,18):
        url = "http://t.icesmall.cn/book/53/826/"+str(i)+".html"
        html = requests.get(url, headers=headers)
        html.encoding = 'utf-8'
        soup = BeautifulSoup(html.text,'lxml')
        s = soup.find('div',id="Content").get_text()
        s = re.sub(r'p\{.*?\}','',s).lstrip().rstrip().strip()
        c_str.append(s)
    c_txt = ''.join(c_str)
    with open('Ctxt.txt','w',encoding='utf-8') as f:
        f.write(c_txt)

def get_word_from_txt():
    with open('Ctxt.txt','r',encoding='utf-8') as f:
        ctxt = f.read()
    jieba.load_userdict('people.txt')  # 詞庫文本文件
    stxt = jieba.lcut(ctxt)
    stops = open('停用詞表.txt','r',encoding='utf-8').read()
    stops = stops.split()
    tokens = [token for token in stxt if token not in stops]
    tokenstr = " ".join(tokens)
    ciyun = WordCloud(background_color = '#36f',width=400,height=300,margin = 1).generate(tokenstr)
    stxtword = set(tokens)
    stxtcount = {}
    for i in stxtword:
        if len(i) == 1:
            continue
        stxtcount[i] = tokens.count(i)
    stxtcount = sorted(stxtcount.items(),key=lambda key:key[1],reverse=True)
    stxtcount = stxtcount[:20]
    for i in range(20):
        print(stxtcount[i])
    plt.imshow(ciyun)
    plt.axis("off")
    plt.show()
    ciyun.to_file(r'The_Kite_Runner.jpg')

if __name__ == '__main__':
    get_txt_from_net()
    get_word_from_txt()

4. 更新詞庫,加入所分析對象的專業詞彙。xml

相關文章
相關標籤/搜索