決策樹ID3算法python實現

在周志華的西瓜書和李航的統計機器學習中對決策樹ID3算法都有很詳細的解釋,如何實現呢?核心點有以下幾個步驟算法

step1:計算香農熵app

from math import log
import operator


# 計算香農熵
def calculate_entropy(data):
    label_counts = {}
    for feature_data in data:
        laber = feature_data[-1]  # 最後一行是laber
        if laber not in label_counts.keys():
            label_counts[laber] = 0
        label_counts[laber] += 1

    count = len(data)
    entropy = 0.0

    for key in label_counts:
        prob = float(label_counts[key]) / count
        entropy -= prob * log(prob, 2)
    return entropy

step2.計算某個feature的信息增益的方法機器學習

# 計算某個feature的信息增益
# index:要計算信息增益的feature 對應的在data 的第幾列
# data 的香農熵
def calculate_relative_entropy(data, index, entropy):
    feat_list = [number[index] for number in data]  # 獲得某個特徵下全部值(某列)
    uniqual_vals = set(feat_list)
    new_entropy = 0
    for value in uniqual_vals:
        sub_data = split_data(data, index, value)
        prob = len(sub_data) / float(len(data))  
        new_entropy += prob * calculate_entropy(sub_data)  # 對各子集香農熵求和
    relative_entropy = entropy - new_entropy  # 計算信息增益
    return relative_entropy

step3.選擇最大信息增益的feature函數

# 選擇最大信息增益的feature
def choose_max_relative_entropy(data):
    num_feature = len(data[0]) - 1
    base_entropy = calculate_entropy(data)#香農熵
    best_infor_gain = 0
    best_feature = -1
    for i in range(num_feature):
        info_gain=calculate_relative_entropy(data, i, base_entropy)
        #最大信息增益
        if (info_gain > best_infor_gain):
            best_infor_gain = info_gain
            best_feature = i

    return best_feature

step4.構建決策樹工具

def create_decision_tree(data, labels):
    class_list=[example[-1] for example in data]
    # 類別相同,中止劃分
    if class_list.count(class_list[-1]) == len(class_list):
        return class_list[-1]
    # 判斷是否遍歷完全部的特徵時返回個數最多的類別
    if len(data[0]) == 1:
        return most_class(class_list)
    # 按照信息增益最高選取分類特徵屬性
    best_feat = choose_max_relative_entropy(data)
    best_feat_lable = labels[best_feat] # 該特徵的label
    decision_tree = {best_feat_lable: {}} # 構建樹的字典
    del(labels[best_feat]) # 從labels的list中刪除該label
    feat_values = [example[best_feat] for example in data]
    unique_values = set(feat_values)
    for value in unique_values:
        sub_lables=labels[:]
        # 構建數據的子集合,並進行遞歸
        decision_tree[best_feat_lable][value] = create_decision_tree(split_data(data, best_feat, value), sub_lables)
    return decision_tree

在構建決策樹的過程當中會用到兩個工具方法:學習

# 當遍歷完全部的特徵時返回個數最多的類別
def most_class(classList):
    class_count={}
    for vote in classList:
        if vote not in class_count.keys():class_count[vote]=0
        class_count[vote]+=1
    sorted_class_count=sorted(class_count.items,key=operator.itemgetter(1),reversed=True)
    return sorted_class_count[0][0]
    
# 工具函數輸入三個變量(待劃分的數據集,特徵,分類值)返回不含劃分特徵的子集
def split_data(data, axis, value):
    ret_data=[]
    for feat_vec in data:
        if feat_vec[axis]==value :
            reduce_feat_vec=feat_vec[:axis]
            reduce_feat_vec.extend(feat_vec[axis+1:])
            ret_data.append(reduce_feat_vec)
    return ret_data
相關文章
相關標籤/搜索