數據:本文采用的數據爲搜狗新聞語料文本http://www.sogou.com/labs/resource/cs.phpphp
首先逐個讀入已下載的txt文件內容,正則表達出URL(新聞類別)和content(新聞內容)git
具體代碼以下:正則表達式
SamplesGen.py算法
# -*- coding: utf-8 -*-
'''
該腳本用於將搜狗語料庫新聞語料
轉化爲按照URL做爲類別名、
content做爲內容的txt文件存儲
'''
import re
from common import listdir
'''字符數小於這個數目的content將不被保存'''
threh = 30
'''獲取全部語料'''
list_name = []
listdir('SogouCS.reduced/',list_name)
'''對每一個語料'''
for path in list_name:
print(path)
file = open(path, 'rb').read().decode("utf8")
'''
正則匹配出url和content
'''
patternURL = re.compile(r'<url>(.*?)</url>', re.S)
patternCtt = re.compile(r'<content>(.*?)</content>', re.S)
classes = patternURL.findall(file)
contents = patternCtt.findall(file)
'''
# 把全部內容小於30字符的文本所有過濾掉
'''
for i in range(contents.__len__())[::-1]:
if len(contents[i]) < threh:
contents.pop(i)
classes.pop(i)
'''
把URL進一步提取出來,只提取出一級url做爲類別
'''
for i in range(classes.__len__()):
patternClass = re.compile(r'http://(.*?)/',re.S)
classi = patternClass.findall(classes[i])
classes[i] = classi[0]
'''
按照RUL做爲類別保存到samples文件夾中
'''
for i in range(classes.__len__()):
file = 'samples/' + classes[i] + '.txt'
f = open(file,'a+',encoding='utf-8')
f.write(contents[i]+'\n') #加\n換行顯示
1.字符數小於30的將不被content保存,在for循環中使用pop()方法刪除。app
2.將URL類別保存在samples文件夾中dom
1.將samples文件夾中分類好的數據讀取函數
2.分詞。處理此中文分詞時使用結巴分詞。先使用正則表達式粗略劃分,而後基於trie樹高速掃描,將每一個句子構造有向無環圖,使用動態規劃查找最大機率路徑,基於詞頻尋找最佳切分方案,最後對於未登陸的單詞(詞表裏沒有的詞語),採用HMM模型,維特比算法劃分測試
3.去停用詞。手動創建一個詞表,將沒有意義的詞語去掉。url
4.統計詞頻,生成詞袋。用1.2.3處理完後,轉化成詞袋,生成詞向量僅須要調用gensim提供的dictionary.doc2bow()方法便可生成。注意這裏保存的是稀疏矩陣。具體格式爲:spa
單個詞向量:( 5 , 2 )
5是該單詞在dictionary中的序號爲5,2是在這篇文章中出現了兩次。
一篇文章矩陣: [ (5 ,2) , (3 , 1) ]
在該文章中出現了5號單詞兩次,3號單詞1次。
5.生成TF-IDF矩陣。根據該單詞在當前文章中出現的頻率和該單詞在全部語料中出現的頻率評估一個單詞的重要性,當一個單詞在這篇文章中出現的次數不少的時候,這個詞語更加劇要;但若是它在全部文章中出現的次數都不少,那麼它就顯得不那麼重要。
6.輸入模型訓練。統計各個類別的樣本數目,將樣本劃分爲訓練集和測試集,採用梯度降低算法進行模型訓練。 結果爲:
代碼爲:
common.py(輔助函數腳本)
import os
'''生成原始語料文件夾下文件列表'''
def listdir(path, list_name):
for file in os.listdir(path):
file_path = os.path.join(path, file) #鏈接目錄與文件名或目錄
if os.path.isdir(file_path): #檢驗給出的路徑是一個文件仍是目錄
listdir(file_path, list_name)
else:
list_name.append(file_path)
def get_stop_words():
path = "stop_words"
file = open(path, 'rb').read().decode('utf-8').split('\r\n')
return set(file)
def rm_stop_words(word_list):
word_list = list(word_list)
stop_words = get_stop_words()
# 這個很重要,注意每次pop以後總長度是變化的
for i in range(word_list.__len__())[::-1]:
# 去停用詞
if word_list[i] in stop_words:
word_list.pop(i)
# 去數字
elif word_list[i].isdigit():
word_list.pop(i)
return word_list
def rm_word_freq_so_little(dictionary, freq_thred):
small_freq_ids = [tokenid for tokenid, docfreq in dictionary.dfs.items() if docfreq < freq_thred ]
dictionary.filter_tokens(small_freq_ids)
dictionary.compactify()
textClsfy.py
'''本文檔負責實際讀取語料庫文件訓練LR模型過程當中保存詞典、語料和訓練後的模型'''import numpy as npfrom sklearn.linear_model.logistic import *from gensim import corpora, models, similaritiesimport jiebafrom sklearn.model_selection import train_test_splitimport picklefrom sklearn.discriminant_analysis import LinearDiscriminantAnalysisfrom scipy.sparse import csr_matrixfrom sklearn.metrics import classification_reportfrom common import listdir, rm_stop_words, rm_word_freq_so_littleif __name__ == '__main__': freq_thred = 10 # 當一個單詞在全部語料中出現次數小於這個閾值,那麼該詞語不該被計入詞典中 # 字典 dictionary = corpora.Dictionary() # 詞袋 bow = [] labels_count = [] list_name = [] listdir('samples/', list_name) count = 0 for path in list_name[0:2]: print(path) file = open(path, 'rb').read().decode('utf-8').split('\n') class_count = 0 for text in file: # 打標籤 class_count = class_count + 1 content = text # 分詞 word_list = list(jieba.cut(content, cut_all=False)) # 去停用詞 word_list = rm_stop_words(word_list) dictionary.add_documents([word_list]) ''' 轉化成詞袋 gensim包中的dic實際至關於一個map doc2bow方法,對沒有出現過的詞語,在dic中增長該詞語 若是dic中有該詞語,則將該詞語序號放到當前word_bow中而且統計該序號單詞在該文本中出現了幾回 ''' word_bow = dictionary.doc2bow(word_list) bow.append(word_bow) labels_count.append(class_count-1) # with open('dictionary.pkl', 'wb') as f1: # pickle.dump(dictionary, f1) # 去除過少單詞 ps:可能致使維數不一樣 rm_word_freq_so_little(dictionary,freq_thred) # dictionary.save('dicsave.dict') # corpora.MmCorpus.serialize('bowsave.mm', bow) tfidf_model = models.TfidfModel(corpus=bow,dictionary=dictionary) # with open('tfidf_model.pkl', 'wb') as f2: # pickle.dump(tfidf_model, f2) '''訓練tf-idf模型''' corpus_tfidf = [tfidf_model[doc] for doc in bow] '''將gensim格式稀疏矩陣轉換成能夠輸入scikit-learn模型格式矩陣''' data = [] rows = [] cols = [] line_count = 0 for line in corpus_tfidf: for elem in line: rows.append(line_count) cols.append(elem[0]) data.append(elem[1]) line_count += 1 print(line_count) tfidf_matrix = csr_matrix((data,(rows,cols))).toarray() count = 0 for ele in tfidf_matrix: # print(ele) # print(count) count = count + 1 # cut label 1 mil label 0 '''生成labels''' labels = np.zeros(sum(labels_count) + 1) for i in range(labels_count[0]): labels[i] = 1 '''分割訓練集和測試集''' rarray=np.random.random(size=line_count) x_train = [] y_train = [] x_test = [] y_test = [] for i in range(line_count-1): if rarray[i]<0.8: x_train.append(tfidf_matrix[i,:]) y_train.append(labels[i]) else: x_test.append(tfidf_matrix[i,:]) y_test.append(labels[i]) # x_train,x_test,y_train,y_test = train_test_split(tfidf_matrix,labels,test_size=0.3,random_state=0) '''LR模型分類訓練''' classifier=LogisticRegression() classifier.fit(x_train, y_train) # # with open('LR_model.pkl', 'wb') as f: # pickle.dump(classifier, f) print(classification_report(y_test,classifier.predict(x_test)))