[TOC] 更新、更全的《機器學習》的更新網站,更有python、go、數據結構與算法、爬蟲、人工智能教學等着你:<a target="_blank" href="https://www.cnblogs.com/nickchen121/p/11686958.html">http://www.javashuo.com/article/p-vozphyqp-cm.html</a>html
import numpy as np import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap from matplotlib.font_manager import FontProperties from sklearn.datasets import load_iris from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import AdaBoostClassifier %matplotlib inline font = FontProperties(fname='/Library/Fonts/Heiti.ttc')
X = iris_data.data[:, [2, 3]] y = iris_data.target label_list = ['山鳶尾', '雜色鳶尾', '維吉尼亞鳶尾']
def plot_decision_regions(X, y, classifier=None): marker_list = ['o', 'x', 's'] color_list = ['r', 'b', 'g'] cmap = ListedColormap(color_list[:len(np.unique(y))]) x1_min, x1_max = X[:, 0].min()-1, X[:, 0].max()+1 x2_min, x2_max = X[:, 1].min()-1, X[:, 1].max()+1 t1 = np.linspace(x1_min, x1_max, 666) t2 = np.linspace(x2_min, x2_max, 666) x1, x2 = np.meshgrid(t1, t2) y_hat = classifier.predict(np.array([x1.ravel(), x2.ravel()]).T) y_hat = y_hat.reshape(x1.shape) plt.contourf(x1, x2, y_hat, alpha=0.2, cmap=cmap) plt.xlim(x1_min, x1_max) plt.ylim(x2_min, x2_max) for ind, clas in enumerate(np.unique(y)): plt.scatter(X[y == clas, 0], X[y == clas, 1], alpha=0.8, s=50, c=color_list[ind], marker=marker_list[ind], label=label_list[clas])
adbt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=2, min_samples_split=20, min_samples_leaf=5), algorithm="SAMME", n_estimators=10, learning_rate=0.8) adbt.fit(X, y)
AdaBoostClassifier(algorithm='SAMME', base_estimator=DecisionTreeClassifier(class_weight=None, criterion='gini', max_depth=2, max_features=None, max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, min_samples_leaf=5, min_samples_split=20, min_weight_fraction_leaf=0.0, presort=False, random_state=None, splitter='best'), learning_rate=0.8, n_estimators=10, random_state=None)
plot_decision_regions(X, y, classifier=adbt) plt.xlabel('花瓣長度(cm)', fontproperties=font) plt.ylabel('花瓣寬度(cm)', fontproperties=font) plt.title('AdaBoost算法代碼(鳶尾花分類, n_e=10, l_r=0.8)', fontproperties=font, fontsize=20) plt.legend(prop=font) plt.show()
_11_0.png?x-oss-process=style/watermark)node
print("Score:{}".format(adbt.score(X, y)))
Score:0.9866666666666667
adbt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=2, min_samples_split=20, min_samples_leaf=5), algorithm="SAMME", n_estimators=300, learning_rate=0.8) adbt.fit(X, y) print("Score:{}".format(adbt.score(X, y)))
Score:0.9933333333333333
因爲樣本太少,可能效果不明顯,可是對比上一個模型能夠發現,相同步長的狀況下,若是弱學習個數越多,擬合效果越好,但若是過多則可能過擬合。python
adbt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=2, min_samples_split=20, min_samples_leaf=5), algorithm="SAMME", n_estimators=300, learning_rate=0.001) adbt.fit(X, y) print("Score:{}".format(adbt.score(X, y)))
Score:0.9533333333333334
相同迭代次數的狀況下,對比上一個模型能夠發現,若是步長越大,則模型效果越好。算法
adbt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=2, min_samples_split=20, min_samples_leaf=5), algorithm="SAMME", n_estimators=600, learning_rate=0.8) adbt.fit(X, y) print("Score:{}".format(adbt.score(X, y)))
Score:0.9933333333333333
對比第二個模型,能夠發現即便增長迭代次數,算法準確率也沒有提升,因此n_estimators=300的時候其實算法就已經收斂了。數據結構