短文本分類-樸素貝葉斯

背景

一個相似智能客服機器人,當用戶輸入一段文字的時候,判斷該文字的類型,根據類型給予回覆python

前期準備知識

樸素貝葉斯(NBC)

樸素貝葉斯分類(NBC)是以貝葉斯定理爲基礎而且假設特徵條件之間相互獨立的方法,先經過已給定的訓練集,以特徵詞之間獨立做爲前提假設,學習從輸入到輸出的聯合機率分佈,再基於學習到的模型,輸入 求出使得後驗機率最大的輸出 json

樸素貝葉斯的例子

有兩個箱子,A箱子和B箱子,A箱子中有100個小球,20白80黑,B箱子裏面有50個小球,40白10黑,求在已知石頭出自 B 桶的條件下,取出白色石頭的機率。bash

P(白色|B) = P(白色)*P(B|白色)/P(B)app

P(白色) = (20+40)/(100+50) = 2/5 白色球的總數/全部球的總數學習

P(B) = 50/150 = 1/3 B桶球的個數/全部球的總數測試

P(B|白色) = 40/50 = 4/5 B桶中白色球的機率ui

P(白色|B) = P(白色) * P(B|白色)/P(B) = (2/5 * 4/5)/(1/3)spa

使用條件機率來分類

  • 若是 p1(x, y) > p2(x, y), 那麼屬於類別 1;
  • 若是 p2(x, y) > p1(X, y), 那麼屬於類別 2;

開始

數據準備(文本分類數據以及類別)

dataSet = [
"自動閃退",
"太卡,閃退",
"上傳視頻老是閃退",
"添加字幕時會閃退",
"不能夠下載高像素的視頻。",
"我高清導不出怎麼辦",
"不能保存好像素的視屏",
]
classVec  =[
"閃退",
"閃退",
"閃退",
"閃退",
"高清導出",
"高清導出",
]
複製代碼

處理數據

  1. 對短文本進行分詞,去掉無用詞,造成每一個文本對特徵詞 方法:用jieba分詞後用停用詞過濾(停用詞表網上百度不少的)
import jieba


# 停用詞表 stop_words.txt 停用詞表
def get_stop_words():
    stop = []
    with open('./stop_words.txt', 'r', encoding='utf-8-sig') as f:
        lines = f.readlines()
        for line in lines:
            lline = line.strip()
            stop.append(lline)
    return stop


# 分詞 停用
def segment_and_stop_word(word):
    stop_words = get_stop_words()
    a = list(jieba.cut(word, cut_all=True))
    data = []
    for i in a:
        if i not in stop_words and i != ' ' and i != '':
            data.append(i)
    return data


dataSet = [
    "自動閃退",
    "太卡,閃退",
    "上傳視頻老是閃退",
    "添加字幕時會閃退",
    "不能夠下載高像素的視頻。",
    "我高清導不出怎麼辦",
    "不能保存好像素的視屏",
]
# 1.閃退 2.高清導出
listClasses = [
    1,
    1,
    1,
    1,
    2,
    2,
    2,
]


if __name__ == "__main__":
    # 1. 加載數據集
    newDataSet = []
    for i in dataSet:
        newDataSet.append(segment_and_stop_word(i))
複製代碼

newDataSet的值爲code

[
        ['自動', '閃', '退'],
        ['太卡', '閃', '退'],
        ['上傳', '視頻', '老是', '閃', '退'],
        ['添加', '字幕', '時會', '閃', '退'],
        ['不可', '如下', '下載', '高像素', '像素', '視頻'],
        ['高清', '導', '不出'],
        ['不能', '保存', '好像', '像素', '視屏']
]
複製代碼
  1. 建立單詞集合
def createVocabList():
    """ 獲取單詞集 :return: 單詞列表vocablist """
    vocabSet = set([]) 
    for document in dataSet:
        vocabSet = vocabSet | set(document)  
    return list(vocabSet)

if __name__ == "__main__":
    # 1. 加載數據集
    newDataSet = []
    for i in dataSet:
        newDataSet.append(segment_and_stop_word(i))
    # 2.獲取單詞集
    vocabList = createVocabList()
複製代碼

vocabListcdn

vocabList = ['高像素', '視屏', '不能', '添加', '保存', '時會', '導', '不可', '像素', '不出', '下載', '老是', '太卡', '字幕', '上傳', '退', '如下', '閃', '好像', '視頻', '自動', '高清']

複製代碼
  1. 計算單詞在類型中出現機率
def setOfWords2Vec(classId, vocabList, dataSet, classVec):
    """ 獲取每一個類型單詞出現個數和單詞出現總個數 p(a|b) = p(a)p(b|a)/p(b) :param classId: 類型ID :param vocabList: 不重複單詞表 :param dataSet: 訓練集 :param classVec: 類型總列表 :return: """
    # pa: 統計訓練中的該類別的比例
    # pb: 統計訓練中的含有該單詞比例
    # pba: 該類別彙總含有該單詞比例
    # pab: 含有該單詞的文本爲該類型的機率
    pa_num = 0
    for i in range(len(classVec)):
        if int(classVec[i]) == int(classId):
            pa_num += 1
    pa = pa_num / len(classVec)
    wordJson = {}
    for i in vocabList:
        wordJson[i] = {
            "pb_num": 0,
            "pb_in_num": 0
        }
    for i in range(len(dataSet)):
        for j in dataSet[i]:
            wordJson[j]['pb_num'] += 1
            if int(classVec[i]) == int(classId):
                wordJson[j]['pb_in_num'] += 1
    wordJsonPab = {}
    for i in wordJson:
        if wordJson[i]['pb_num'] and wordJson[i]['pb_in_num']:
            pb = wordJson[i]['pb_num'] / len(classVec)
            pba = wordJson[i]['pb_in_num'] / pa_num
            # p(a|b) = p(a)p(b|a)/p(b)
            wordJsonPab[i] = {
                "pab": round(pa * pba / pb, 4),
                "pa_num": pa_num,
                "pb_num": wordJson[i]['pb_num'],
                "pb_in_num": wordJson[i]['pb_in_num'],
                "pb": pb,
                "pa": pa
            }

    return wordJsonPab
    
if __name__ == "__main__":
    # 1. 加載數據集
    newDataSet = []
    for i in dataSet:
        newDataSet.append(segment_and_stop_word(i))
    # 2.獲取單詞集
    vocabList = createVocabList()
    # 3. 單詞在類型中出現機率
    wordJson = {}
    classListSet = set(listClasses)
    for i in classListSet:
        wordJson[i] = setOfWords2Vec(i, vocabList, listOPosts, listClasses)
複製代碼

wordJson 就是每一個特徵詞(vocabList)在各個類型中的機率

{
  1: {
    '時會': {
      'pab': 1.0
    },
    '添加': {
      'pab': 1.0
    },
    '閃': {
      'pab': 1.0
    },
    '視頻': {
      'pab': 0.5
    },
    '退': {
      'pab': 1.0
    },
    '老是': {
      'pab': 1.0
    },
    '字幕': {
      'pab': 1.0
    },
    '上傳': {
      'pab': 1.0
    },
    '太卡': {
      'pab': 1.0
    },
    '自動': {
      'pab': 1.0
    }
  },
  2: {
    '好像': {
      'pab': 1.0
    },
    '高清': {
      'pab': 1.0
    },
    '如下': {
      'pab': 1.0
    },
    '不可': {
      'pab': 1.0
    },
    '視頻': {
      'pab': 0.5
    },
    '高像素': {
      'pab': 1.0
    },
    '不能': {
      'pab': 1.0
    },
    '下載': {
      'pab': 1.0
    },
    '視屏': {
      'pab': 1.0
    },
    '導': {
      'pab': 1.0
    },
    '像素': {
      'pab': 1.0
    },
    '保存': {
      'pab': 1.0
    },
    '不出': {
      'pab': 1.0
    }
  }
}

複製代碼

接下來就是測試了!!!!!

def getTestWordClassId(wordJson, words, text):
    """ 獲取測試文本所屬類別 :param wordJson: 單詞在類型中出現機率 :param words: 測試文本 :return: """
    p = 0
    classId = 1  
    for i in wordJson:
        num = 0
        for j in words:
            if j in wordJson[i]:
                num += wordJson[i][j]['pab']
        if float(num) > float(p):
            p = num
            classId = i
    return int(classId)

if __name__ == "__main__":
    newDataSet = []
    for i in dataSet:
        newDataSet.append(segment_and_stop_word(i))
    vocabList = createVocabList(newDataSet)
    # 3. 單詞在類型中出現機率
    wordJson = {}
    classListSet = set(listClasses)
    for i in classListSet:
        wordJson[i] = setOfWords2Vec(i, vocabList, newDataSet, listClasses)

    testDataSet = newDataSet  # 測試集
    classResult = {}
    trainTextAndIds = []
    for i in range(len(dataSet)):
        trainTextAndIds.append({
            "id": i,
            "text": dataSet[i],
        })
    for i in range(len(testDataSet)):
        classResult[trainTextAndIds[i]['id']] = {
            "train_id": listClasses[i],
            "test_id": getTestWordClassId(wordJson, testDataSet[i], trainTextAndIds[i]['text']),
            "id": trainTextAndIds[i]['id'],
            "text": trainTextAndIds[i]['text']
        }
複製代碼
  1. 計算召回率正確率
# 計算準確率召回率
def getRate(dataSet, classVec):
    rates = {}
    for i in classVec:
        rates[i] = {
            'TP': 0, 'FN': 0, 'FP': 0, 'TN': 0
        }
    for i in dataSet:
        if dataSet[i]['train_id'] == dataSet[i]['test_id']:
            rates[dataSet[i]['train_id']]['TP'] += 1  # TP: 將正類預測爲正類數
        else:
            rates[dataSet[i]['train_id']]['FN'] += 1  # FN: 將正類預測爲負類數
            rates[dataSet[i]['test_id']]['FP'] += 1  # FP: 將負類預測爲正類數
    for i in rates:
        rates[i]['TN'] = len(dataSet) - rates[i]['TP'] - rates[i]['FP'] - rates[i]['FN']  # TN: 將負類預測爲負類數
    accuracy_recall_list = []
    for i in rates:
        row = rates[i]
        _row = {
            "tag_id": i,
            "recall": 0.0 if row['TP'] + row['FN'] == 0 else round(row['TP'] / (row['TP'] + row['FN']), 4),
            "accuracy": round((row['TP'] + row['TN']) / (row['TP'] + row['FP'] + row['TN'] + row['FN']), 4),
            "row": row
        }
        accuracy_recall_list.append(_row)
    return accuracy_recall_list
    
if __name__ == "__main__":
    newDataSet = []
    for i in dataSet:
        newDataSet.append(segment_and_stop_word(i))
    vocabList = createVocabList(newDataSet)
    # 3. 單詞在類型中出現機率
    wordJson = {}
    classListSet = set(listClasses)
    for i in classListSet:
        wordJson[i] = setOfWords2Vec(i, vocabList, newDataSet, listClasses)

    testDataSet = newDataSet  # 測試集
    classResult = {}
    trainTextAndIds = []
    for i in range(len(dataSet)):
        trainTextAndIds.append({
            "id": i,
            "text": dataSet[i],
        })
    for i in range(len(testDataSet)):
        classResult[trainTextAndIds[i]['id']] = {
            "train_id": listClasses[i],
            "test_id": getTestWordClassId(wordJson, testDataSet[i], trainTextAndIds[i]['text']),
            "id": trainTextAndIds[i]['id'],
            "text": trainTextAndIds[i]['text']
        }

    # 4.計算召回率正確率
    accuracy_recall_list = getRate(classResult, set(listClasses))
複製代碼

accuracy_recall_list

[
  {
    'tag_id': 1,
    'recall': 1.0,
    'accuracy': 1.0,
    'row': {
      'TP': 4,
      'FN': 0,
      'FP': 0,
      'TN': 3
    }
  },
  {
    'tag_id': 2,
    'recall': 1.0,
    'accuracy': 1.0,
    'row': {
      'TP': 3,
      'FN': 0,
      'FP': 0,
      'TN': 4
    }
  }
]
複製代碼

結束

由於訓練集的數據過少,分類比較明確,因此準確率和召回率很高,可是方法差很少,能夠用大量的訓練集來測試

相關文章
相關標籤/搜索