源碼地址:pypi.org/project/slm…python
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
%matplotlib inline
複製代碼
# X 爲樣本特徵,y 爲樣本類別輸出,共 30 個樣本,每一個樣本 2 個特
# 輸出有 2 個類別,沒有冗餘特徵,每一個類別一個簇,隨機狀態爲小武小久
X, y = make_classification(n_samples=40,
n_features=2,
n_informative=2,
n_redundant=0,
n_classes=2,
n_clusters_per_class=1,
random_state=59)
複製代碼
y = np.array([1 if i == 1 else -1 for i in y])
複製代碼
X
複製代碼
array([[ 1.26654361, 1.20833252],
[ 2.06535223, -2.13177734],
[ 0.11811139, 0.76083319],
[ 0.90184139, -0.93177662],
[ 0.96240122, -0.99933393],
[ 0.32421018, -0.79732491],
[ 0.16465073, -0.67426018],
[-0.8393532 , 0.02946355],
[ 1.50737475, -1.62701845],
[ 1.81169336, -1.46844229],
[ 2.63399788, 0.97604937],
[ 1.59518059, -1.52110826],
[ 1.74505037, 1.46262955],
[ 1.86201667, 0.93060442],
[ 0.11905276, -0.52595242],
[ 1.48508147, -0.97794231],
[-0.92705557, 0.39367018],
[ 3.42983834, -2.72193889],
[ 1.99481682, 1.60244273],
[ 2.59197055, 1.29968242],
[ 1.60598522, -1.13518763],
[ 1.52118196, 1.12174498],
[ 0.13097699, -0.58481754],
[ 0.84318571, 0.65777867],
[ 1.0940501 , -0.02064728],
[ 0.36579428, 1.32704784],
[ 1.46113021, 1.11893705],
[ 0.42126239, -0.25448586],
[ 0.97213672, 0.63569881],
[ 0.81731191, 0.7569019 ],
[ 1.85348049, -1.36379487],
[ 0.81830809, 0.89762419],
[ 0.9559071 , -1.13200362],
[ 1.19788365, 0.24486292],
[ 0.43394253, 1.2190042 ],
[ 0.75245696, -1.00391468],
[ 0.26120942, -0.49444024],
[ 0.23915671, 1.72744475],
[ 1.33104015, 1.73132756],
[ 2.37528241, 1.47588251]])
複製代碼
y
複製代碼
array([ 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1,
-1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, -1, 1,
1, -1, -1, 1, 1, 1])
複製代碼
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, marker='o')
複製代碼
<matplotlib.collections.PathCollection at 0x129908d68>
複製代碼
from sklearn.linear_model import Perceptron
複製代碼
clf = Perceptron(fit_intercept=True, max_iter=1000, shuffle=False)
clf.fit(X, y)
複製代碼
Perceptron(alpha=0.0001, class_weight=None, early_stopping=False, eta0=1.0,
fit_intercept=True, max_iter=1000, n_iter_no_change=5, n_jobs=None,
penalty=None, random_state=0, shuffle=False, tol=0.001,
validation_fraction=0.1, verbose=0, warm_start=False)
複製代碼
# Weights 權重(值)
print(clf.coef_)
複製代碼
[[2.06165026 2.89632885]]
複製代碼
# bias 截距
print(clf.intercept_)
複製代碼
[-1.]
複製代碼
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, marker='o')
minX = np.min(X[:, 0])
maxX = np.max(X[:, 0])
x_points = np.array([minX, maxX])
sklearn_y = -(clf.coef_[0][0] * x_points + clf.intercept_) / clf.coef_[0][1]
plt.plot(x_points, sklearn_y, 'b-.', label='sklearn', linewidth=0.3)
複製代碼
[<matplotlib.lines.Line2D at 0x12bdbe320>]
複製代碼
from slmethod.perceptron import Perceptron
複製代碼
origin_clf = Perceptron(dual=False)
origin_clf.fit(X, y)
複製代碼
print(origin_clf.w)
複製代碼
[0.20616503 0.28963289]
複製代碼
print(origin_clf.b)
複製代碼
-0.1
複製代碼
dual_clf = Perceptron(dual=True)
dual_clf.fit(X, y)
複製代碼
print(dual_clf.w)
複製代碼
[0.20616503 0.28963289]
複製代碼
print(dual_clf.b)
複製代碼
-0.1
複製代碼
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, marker='o')
sklearn_y = -(clf.coef_[0][0] * x_points + clf.intercept_) / clf.coef_[0][1]
origin_y = -(origin_clf.w[0] * x_points + origin_clf.b) / origin_clf.w[1]
dual_y = -(dual_clf.w[0] * x_points + dual_clf.b) / dual_clf.w[1]
plt.plot(x_points, sklearn_y, 'b*', label='sklearn')
plt.plot(x_points, origin_y, 'r.-', label='origin')
plt.plot(x_points, dual_y, 'g-.', label='dual')
plt.legend()
plt.show()
複製代碼
從上圖能夠看到 sklearn,原始類型和對偶形式的線幾乎重合了。dom