樸素貝葉斯

#樸素:考慮每一個特徵或者詞,出項的可能性與它和其餘單詞相鄰沒有關係 #每一個特徵等權重
from numpy import *

def loadDataSet(): postingList=[['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'], ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'], ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'], ['stop', 'posting', 'stupid', 'worthless', 'garbage'], ['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'], ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']] classVec = [0,1,0,1,0,1]    #1表明侮辱性段落,0表明正常段落
    return postingList,classVec #建立一個單詞的集合
def createVocabList(dataSet): vocabSet = set([]) #建立空集合
    for document in dataSet: vocabSet |= set(document) return list(vocabSet) #判斷文檔出如今詞彙表中
def setOfWordsVec(vocabSet,inputSet): returnVec = [0]*len(vocabSet) for word in inputSet: if word in vocabSet: returnVec[vocabSet.index(word)] = 1
        else: print ("the word: %s is not in the Vocabulary!" % word) return returnVec def trainNB0(trainMatrix,trainCategory): numTrainDocs = len(trainMatrix) #有幾行話
    numWords = len(trainMatrix[0]) #每行的詞彙表的詞數
    # print(numTrainDocs)
    # print(numWords)
    pAbusive = sum(trainCategory)/float(numTrainDocs) #p(Ci)
    p0Num = ones(numWords) p1Num = ones(numWords) # print(p0Num)
    #p0Denom = 2.0; p1Denom = 2.0 #書上是2.0 不知道爲何 p(x1|c1)= (n1 + 1) / (n + N) 看網上的,
    #爲了湊成機率和是1,N應該是numWords
    p0Denom = 1.0*numWords; p1Denom = 1.0*numWords for i in range(numTrainDocs): if trainCategory[i] == 1: #某句話是侮辱性的話
            p1Num += trainMatrix[i] #矩陣相加
            #print(p1Num)
            p1Denom += sum(trainMatrix[i]) #print(p1Denom)
        else: p0Num += trainMatrix[i] p0Denom += sum(trainMatrix[i]) p1Vect = log(p1Num/p1Denom) p0Vect = log(p0Num/p0Denom) # print(p1Vect)
    # # summ = 0.0
    # # for V in p1Vect:
    # # summ += V
    # # print(summ)
    # print(p0Vect)
    # print(pAbusive)
    return p0Vect,p1Vect,pAbusive def classifyNB(vecOClassify,p0Vec,p1Vec,p1Class): p1 = sum(vecOClassify*p1Vec) + log(p1Class) p0 = sum(vecOClassify*p0Vec) + log(1 - p1Class) if p1 > p0: return 1
    else: return 0 def main(): listOPosts,listClasses = loadDataSet() myVocabList = createVocabList(listOPosts) #建立一個元素互不重複的集合
    #print (myVocabList)
    trainMat = [] #print(type(trainMat))
    for postinDoc in listOPosts: trainMat.append(setOfWordsVec(myVocabList, postinDoc)) #print(trainMat)
    p0V,p1V,pC1 = trainNB0(trainMat, listClasses) testEntry = ['love','my','dalmation'] thisDoc = array(setOfWordsVec(myVocabList, testEntry)) print(testEntry,"classified as: ",classifyNB(thisDoc, p0V, p1V, pC1)) testEntry = ['stupid','garbage'] thisDoc = array(setOfWordsVec(myVocabList, testEntry)) print(testEntry,"classified as: ",classifyNB(thisDoc, p0V, p1V, pC1)) testEntry = ['stupid','cute','love','help'] thisDoc = array(setOfWordsVec(myVocabList, testEntry)) print(testEntry,"classified as: ",classifyNB(thisDoc, p0V, p1V, pC1)) #print(setOfWordsVec(myVocabList, listOPosts[0]))
    #print(setOfWordsVec(myVocabList, listOPosts[1]))
main()
['love', 'my', 'dalmation'] classified as: 0 ['stupid', 'garbage'] classified as:  1 ['stupid', 'cute', 'love', 'help'] classified as:  0

通過這一段時間學習,發現ML in Action這本書確實是側重代碼方面,關於數學推導方面仍是不多,須要不斷查看文檔。網絡

連接: 貝葉斯網絡、拉普拉斯平滑app

連接: Naive Bayesless

看完拉普拉斯平滑後,我以爲書上有問題,就是防止機率中有0的時候,分子每項加一,它寫的分母 p0Denom = 2.0; p1Denom = 2.0,不明白爲何dom

看了文章後,post

p(x1|c1)= (n1 + 1) / (n + N)
N是全部單詞的數目,爲了保證機率和爲1。
因此我這裏用的是NumWords

 

學習致用仍是很好,哈哈哈學習

 

今天作個例子:使用樸素貝葉斯過濾垃圾郵件:測試

(仍是ppt中寫公式比較好用)ui

發現書上的代碼和它給的代碼庫中的有些不同,我以爲多是翻譯的時候出現了問題。this

def textParse(bigString): import re listOTokens = re.split(r'\W*', bigString) # 匹配任意數字和字母,至關於 [a-zA-Z0-9_]
    return [tok.lower() for tok in listOTokens if len(tok) > 2] def spamTest(): #廣告過濾
    docList = [];classList = [];fullText = [] for i in range(1,26): #wordList = textParse(open('email/spam/%d.txt' % i).read().encode('utf-8').decode('utf-8')) #spam中是垃圾郵件
        wordList = textParse(open('email/spam/%d.txt' % i).read()) docList.append(wordList) fullText.extend(wordList) classList.append(1) #wordList = textParse(open('email/ham/%d.txt' % i).read().encode('gbk').decode('gbk')) 
        wordList = textParse(open('email/ham/%d.txt' % i).read()) docList.append(wordList) fullText.extend(wordList) classList.append(0) vocabList = createVocabList(docList) #print(vocabList)
    # 40個訓練集,10個測試集
    trainingSet = list(range(50));testSet = [] #print(trainingSet)
    #print(type(random.uniform(0, len(trainingSet)))) float
    for i in range(10): randIndex = int(random.uniform(0, len(trainingSet))) testSet.append(trainingSet[randIndex]) #保證隨機性,無論重複,其實不會重複,重複的只是元素索引
        del(trainingSet[randIndex]) trainMax = []; trainClasses = [] #print(trainingSet)
    for docIndex in trainingSet: #print(docIndex)
 trainMax.append(bagOfWordsVec(vocabList, docList[docIndex])) trainClasses.append(classList[docIndex]) p0V,p1V,pSpam = trainNB0(array(trainMax),array(trainClasses)) errorCount = 0 for docIndex in testSet: wordVector = bagOfWordsVec(vocabList, docList[docIndex]) if classifyNB(array(wordVector), p0V, p1V, pSpam) != classList[docIndex]: errorCount += 1
            print("the error text %s" % docList[docIndex]) print("error rate: %f " % (float(errorCount)/len(testSet))) spamTest()
相關文章
相關標籤/搜索