# -*- coding: UTF-8 -*- import numpy as np from functools import reduce """ 函數說明:建立實驗樣本 Parameters: 無 Returns: postingList - 實驗樣本切分的詞條 classVec - 類別標籤向量 """ def loadDataSet(): postingList=[['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'], #切分的詞條 ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'], ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'], ['stop', 'posting', 'stupid', 'worthless', 'garbage'], ['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'], ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']] classVec = [0,1,0,1,0,1] #類別標籤向量,1表明侮辱性詞彙,0表明不是 return postingList,classVec #返回實驗樣本切分的詞條和類別標籤向量 """ 函數說明:將切分的實驗樣本詞條整理成不重複的詞條列表,也就是詞彙表 Parameters: dataSet - 整理的樣本數據集 Returns: vocabSet - 返回不重複的詞條列表,也就是詞彙表 """ def createVocabList(dataSet): vocabSet = set([]) #建立一個空的不重複列表 for document in dataSet: vocabSet = vocabSet | set(document) #取並集 return list(vocabSet) """ 函數說明:根據vocabList詞彙表,將inputSet向量化,向量的每一個元素爲1或0 Parameters: vocabList - createVocabList返回的列表 inputSet - 切分的詞條列表 Returns: returnVec - 文檔向量,詞集模型 """ def setOfWords2Vec(vocabList, inputSet): returnVec = [0] * len(vocabList) #建立一個其中所含元素都爲0的向量 for word in inputSet: #遍歷每一個詞條 if word in vocabList: #若是詞條存在於詞彙表中,則置1 returnVec[vocabList.index(word)] = 1 else: print("the word: %s is not in my Vocabulary!" % word) return returnVec #返回文檔向量 """ 函數說明:樸素貝葉斯分類器訓練函數 Parameters: trainMatrix - 訓練文檔矩陣,即setOfWords2Vec返回的returnVec構成的矩陣 trainCategory - 訓練類別標籤向量,即loadDataSet返回的classVec Returns: p0Vect - 非侮辱類的條件機率數組 p1Vect - 侮辱類的條件機率數組 pAbusive - 文檔屬於侮辱類的機率 """ def trainNB0(trainMatrix,trainCategory): numTrainDocs = len(trainMatrix) #計算訓練的文檔數目 numWords = len(trainMatrix[0]) #計算每篇文檔的詞條數 pAbusive = sum(trainCategory)/float(numTrainDocs) #文檔屬於侮辱類的機率 p0Num = np.zeros(numWords); p1Num = np.zeros(numWords) #建立numpy.zeros數組, p0Denom = 0.0; p1Denom = 0.0 #分母初始化爲0.0 for i in range(numTrainDocs): if trainCategory[i] == 1: #統計屬於侮辱類的條件機率所需的數據,即P(w0|1),P(w1|1),P(w2|1)··· p1Num += trainMatrix[i] p1Denom += sum(trainMatrix[i]) else: #統計屬於非侮辱類的條件機率所需的數據,即P(w0|0),P(w1|0),P(w2|0)··· p0Num += trainMatrix[i] p0Denom += sum(trainMatrix[i]) p1Vect = p1Num/p1Denom #相除 p0Vect = p0Num/p0Denom return p0Vect,p1Vect,pAbusive #返回屬於侮辱類的條件機率數組,屬於非侮辱類的條件機率數組,文檔屬於侮辱類的機率 """ 函數說明:樸素貝葉斯分類器分類函數 Parameters: vec2Classify - 待分類的詞條數組 p0Vec - 侮辱類的條件機率數組 p1Vec -非侮辱類的條件機率數組 pClass1 - 文檔屬於侮辱類的機率 Returns: 0 - 屬於非侮辱類 1 - 屬於侮辱類 """ def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1): p1 = reduce(lambda x,y:x*y, vec2Classify * p1Vec) * pClass1 #對應元素相乘 p0 = reduce(lambda x,y:x*y, vec2Classify * p0Vec) * (1.0 - pClass1) print('p0:',p0) print('p1:',p1) if p1 > p0: return 1 else: return 0 """ 函數說明:測試樸素貝葉斯分類器 Parameters: 無 Returns: 無 """ def testingNB(): listOPosts,listClasses = loadDataSet() #建立實驗樣本 myVocabList = createVocabList(listOPosts) #建立詞彙表 trainMat=[] for postinDoc in listOPosts: trainMat.append(setOfWords2Vec(myVocabList, postinDoc)) #將實驗樣本向量化 p0V,p1V,pAb = trainNB0(np.array(trainMat),np.array(listClasses)) #訓練樸素貝葉斯分類器 testEntry = ['love', 'my', 'dalmation'] #測試樣本1 thisDoc = np.array(setOfWords2Vec(myVocabList, testEntry)) #測試樣本向量化 if classifyNB(thisDoc,p0V,p1V,pAb): print(testEntry,'屬於侮辱類') #執行分類並打印分類結果 else: print(testEntry,'屬於非侮辱類') #執行分類並打印分類結果 testEntry = ['stupid', 'garbage'] #測試樣本2 thisDoc = np.array(setOfWords2Vec(myVocabList, testEntry)) #測試樣本向量化 if classifyNB(thisDoc,p0V,p1V,pAb): print(testEntry,'屬於侮辱類') #執行分類並打印分類結果 else: print(testEntry,'屬於非侮辱類') #執行分類並打印分類結果 if __name__ == '__main__': testingNB()
2.2 sklearn實現html
scikit-learn中樸素貝葉斯類庫的使用也比較簡單。相對於決策樹,KNN之類的算法,樸素貝葉斯須要關注的參數是比較少的。git
在scikit-learn中,一共有3個樸素貝葉斯的分類算法類。分別是GaussianNB,MultinomialNB和BernoulliNB。其中GaussianNB就是先驗爲高斯分佈的樸素貝葉斯,MultinomialNB就是先驗爲多項式分佈的樸素貝葉斯,而BernoulliNB就是先驗爲伯努利分佈的樸素貝葉斯。算法
# -*- coding: UTF-8 -*- from sklearn.naive_bayes import MultinomialNB import matplotlib.pyplot as plt import os import random import jieba """ 函數說明:中文文本處理 Parameters: folder_path - 文本存放的路徑 test_size - 測試集佔比,默認佔全部數據集的百分之20 Returns: all_words_list - 按詞頻降序排序的訓練集列表 train_data_list - 訓練集列表 test_data_list - 測試集列表 train_class_list - 訓練集標籤列表 test_class_list - 測試集標籤列表 """ def TextProcessing(folder_path, test_size = 0.2): folder_list = os.listdir(folder_path) #查看folder_path下的文件 data_list = [] #數據集數據 class_list = [] #數據集類別 #遍歷每一個子文件夾 for folder in folder_list: new_folder_path = os.path.join(folder_path, folder) #根據子文件夾,生成新的路徑 files = os.listdir(new_folder_path) #存放子文件夾下的txt文件的列表 j = 1 #遍歷每一個txt文件 for file in files: if j > 100: #每類txt樣本數最多100個 break with open(os.path.join(new_folder_path, file), 'r', encoding = 'utf-8') as f: #打開txt文件 raw = f.read() word_cut = jieba.cut(raw, cut_all = False) #精簡模式,返回一個可迭代的generator word_list = list(word_cut) #generator轉換爲list data_list.append(word_list) #添加數據集數據 class_list.append(folder) #添加數據集類別 j += 1 data_class_list = list(zip(data_list, class_list)) #zip壓縮合並,將數據與標籤對應壓縮 random.shuffle(data_class_list) #將data_class_list亂序 index = int(len(data_class_list) * test_size) + 1 #訓練集和測試集切分的索引值 train_list = data_class_list[index:] #訓練集 test_list = data_class_list[:index] #測試集 train_data_list, train_class_list = zip(*train_list) #訓練集解壓縮 test_data_list, test_class_list = zip(*test_list) #測試集解壓縮 all_words_dict = {} #統計訓練集詞頻 for word_list in train_data_list: for word in word_list: if word in all_words_dict.keys(): all_words_dict[word] += 1 else: all_words_dict[word] = 1 #根據鍵的值倒序排序 all_words_tuple_list = sorted(all_words_dict.items(), key = lambda f:f[1], reverse = True) all_words_list, all_words_nums = zip(*all_words_tuple_list) #解壓縮 all_words_list = list(all_words_list) #轉換成列表 return all_words_list, train_data_list, test_data_list, train_class_list, test_class_list """ 函數說明:讀取文件裏的內容,並去重 Parameters: words_file - 文件路徑 Returns: words_set - 讀取的內容的set集合 """ def MakeWordsSet(words_file): words_set = set() #建立set集合 with open(words_file, 'r', encoding = 'utf-8') as f: #打開文件 for line in f.readlines(): #一行一行讀取 word = line.strip() #去回車 if len(word) > 0: #有文本,則添加到words_set中 words_set.add(word) return words_set #返回處理結果 """ 函數說明:根據feature_words將文本向量化 Parameters: train_data_list - 訓練集 test_data_list - 測試集 feature_words - 特徵集 Returns: train_feature_list - 訓練集向量化列表 test_feature_list - 測試集向量化列表 """ def TextFeatures(train_data_list, test_data_list, feature_words): def text_features(text, feature_words): #出如今特徵集中,則置1 text_words = set(text) features = [1 if word in text_words else 0 for word in feature_words] return features train_feature_list = [text_features(text, feature_words) for text in train_data_list] test_feature_list = [text_features(text, feature_words) for text in test_data_list] return train_feature_list, test_feature_list #返回結果 """ 函數說明:文本特徵選取 Parameters: all_words_list - 訓練集全部文本列表 deleteN - 刪除詞頻最高的deleteN個詞 stopwords_set - 指定的結束語 Returns: feature_words - 特徵集 """ def words_dict(all_words_list, deleteN, stopwords_set = set()): feature_words = [] #特徵列表 n = 1 for t in range(deleteN, len(all_words_list), 1): if n > 1000: #feature_words的維度爲1000 break #若是這個詞不是數字,而且不是指定的結束語,而且單詞長度大於1小於5,那麼這個詞就能夠做爲特徵詞 if not all_words_list[t].isdigit() and all_words_list[t] not in stopwords_set and 1 < len(all_words_list[t]) < 5: feature_words.append(all_words_list[t]) n += 1 return feature_words """ 函數說明:新聞分類器 Parameters: train_feature_list - 訓練集向量化的特徵文本 test_feature_list - 測試集向量化的特徵文本 train_class_list - 訓練集分類標籤 test_class_list - 測試集分類標籤 Returns: test_accuracy - 分類器精度 """ def TextClassifier(train_feature_list, test_feature_list, train_class_list, test_class_list): classifier = MultinomialNB().fit(train_feature_list, train_class_list) test_accuracy = classifier.score(test_feature_list, test_class_list) return test_accuracy if __name__ == '__main__': #文本預處理 folder_path = './SogouC/Sample' #訓練集存放地址 all_words_list, train_data_list, test_data_list, train_class_list, test_class_list = TextProcessing(folder_path, test_size=0.2) # 生成stopwords_set stopwords_file = './stopwords_cn.txt' stopwords_set = MakeWordsSet(stopwords_file) test_accuracy_list = [] deleteNs = range(0, 1000, 20) #0 20 40 60 ... 980 for deleteN in deleteNs: feature_words = words_dict(all_words_list, deleteN, stopwords_set) train_feature_list, test_feature_list = TextFeatures(train_data_list, test_data_list, feature_words) test_accuracy = TextClassifier(train_feature_list, test_feature_list, train_class_list, test_class_list) test_accuracy_list.append(test_accuracy) # ave = lambda c: sum(c) / len(c) # print(ave(test_accuracy_list)) plt.figure() plt.plot(deleteNs, test_accuracy_list) plt.title('Relationship of deleteNs and test_accuracy') plt.xlabel('deleteNs') plt.ylabel('test_accuracy') plt.show()