機器學習實戰-樸素貝葉斯

數據集參考自https://blog.csdn.net/c406495762/article/details/77341116app

樸素貝葉斯:首先,何爲樸素?樸素要求的是條件特徵之間相互獨立。咱們都知道大名鼎鼎的貝葉斯公式,其實樸素貝葉斯的思想很簡單。就是經過計算屬於某一類別的後驗機率,而後比較大小,哪一類的機率大,那麼就將它劃分爲哪一類。。。less

由上述公式可知,咱們先驗機率P(A)很容易求得,咱們重點在與求條件機率。因爲條件特徵之間相互獨立,因而能夠拆分紅累乘的形式。在代碼實現中,咱們通常不會去求P(B),由於分母都是同樣的,咱們只關注類別機率的大小!post

 

將代碼記錄以下,方便之後查看。ui

 

import numpy as np
import pandas as pd
def getDataSet():
    postingList=[['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],
                 ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
                 ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
                 ['stop', 'posting', 'stupid', 'worthless', 'garbage'],
                 ['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
                 ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
    labels = [0,1,0,1,0,1]
    return postingList,labels

#建立詞彙表
def createVocabList(dataSet):
    vocabSet = set([])
    for data in dataSet:
        vocabSet = vocabSet | set(data)
    return list(vocabSet)

#向量化
def vectorize(vocabSet,dataSet):
    vocab = [0] * len(vocabSet)
    for data in dataSet:
        vocab[vocabSet.index(data)] = 1
    return vocab


#樸素貝葉斯建模
def trainN(X_train,y_train):
    num = len(X_train)   #有多少記錄
    numvocab = len(X_train[0]) #詞向量的大小
    p0Num = np.ones(numvocab) #統計非侮辱類的相關單詞頻數 加入了拉普拉斯平滑
    p1Num = np.ones(numvocab) #統計侮辱類的相關單詞頻數
    p0Sum = 2
    p1Sum = 2
    pA = sum(y_train) / num                   #先驗機率
    for i in range(num):
        if y_train[i]==0:   #統計屬於非侮辱類的條件機率所需的數據
            p0Sum += sum(X_train[i])
            p0Num += X_train[i]
        else:               #統計屬於侮辱類的條件機率所需的數據
            p1Sum += sum(X_train[i])
            p1Num += X_train[i]

    # 爲了防止下溢出,計算條件機率的對數
    p0 = np.log(p0Num / p0Sum)      #頻數除以總數 獲得機率
    p1 = np.log(p1Num / p1Sum)
    return p0,p1,pA

#分類
def classify(testMat,p0,p1,pA):
    p0Score = sum(testMat * p0) + np.log(pA)
    p1Score = sum(testMat * p1) + np.log(1-pA)
    if p0Score > p1Score:
        return 0
    else:
        return 1

if __name__=='__main__':
    dataSet,label = getDataSet()
    vocabSet = createVocabList(dataSet)
    trainMat = []
    for elem in dataSet:
        trainMat.append(vectorize(vocabSet,elem))
    # print(trainMat)
    p0,p1,pA = trainN(trainMat,label)
    test1= ['love', 'my', 'dalmation']
    test2= ['stupid', 'garbage']
    test1_vocab = np.array(vectorize(vocabSet,test1))
    test2_vocab = np.array(vectorize(vocabSet,test2))
    result1 = classify(test1_vocab,p0,p1,pA)
    result2 = classify(test2_vocab,p0,p1,pA)
    if result1==1:
        print(test1,"屬於:侮辱類")
    else:
        print(test1, "屬於:非侮辱類")
    print("=======================================")
    if result2==1:
        print(test2,"屬於:侮辱類")
    else:
        print(test2, "屬於:非侮辱類")

結果以下:['love', 'my', 'dalmation'] 屬於:非侮辱類
=======================================
['stupid', 'garbage'] 屬於:侮辱類
相關文章
相關標籤/搜索