$$z^{(i)} = w^T x^{(i)} + b $$
$x^{(i)}$是第i個訓練樣本
$$\hat{y}^{(i)} = a^{(i)} = sigmoid(z^{(i)})$$
$$ \mathcal{L}(a^{(i)}, y^{(i)}) = - y^{(i)} \log(a^{(i)}) - (1-y^{(i)} ) \log(1-a^{(i)})$$
$$ J = \frac{1}{m} \sum_{i=1}^m \mathcal{L}(a^{(i)}, y^{(i)})$$
$J$ 是損失函數,表示預測值與指望輸出值的差值數組
import numpy as np import matplotlib.pyplot as plt import h5py import scipy from PIL import Image from scipy import ndimage from lr_utils import load_dataset #讀入數據 %matplotlib inline
$$sigmoid( w^T x + b) = \frac{1}{1 + e^{-(w^T x + b)}}$$app
def sigmoid(z): # z是任何大小的標量或者numpy數組 s = 1 / (1 + np.exp(-z)) return s # s -- sigmoid(z)
def initialize_with_zeros(dim): # dim - 咱們想要的w矢量的大小(或者這種狀況下的參數數量) w = np.zeros((dim, 1)) b = 0 assert(w.shape == (dim, 1)) assert(isinstance(b, float) or isinstance(b, int)) return w, b # w--初始化形狀爲(dim, 1)的零矩陣 b - 初始化的標量(對應於誤差)
$$A = \sigma(w^T X + b) = (a^{(0)}, a^{(1)}, ..., a^{(m-1)}, a^{(m)})$$
$$J = -\frac{1}{m}\sum_{i=1}^{m}y^{(i)}\log(a^{(i)})+(1-y^{(i)})\log(1-a^{(i)})$$
$$ \frac{\partial J}{\partial w} = \frac{1}{m}X(A-Y)^T$$
$$ \frac{\partial J}{\partial b} = \frac{1}{m} \sum_{i=1}^m (a^{(i)}-y^{(i)})$$函數
def propagate(w, b, X, Y): # w -- weights, 一個形狀爲(num_px * num_px * 3, 1)的numpy數組 # b -- bias, 一個標量 # X -- 形狀爲(num_px * num_px * 3, number of examples)的數據集 # Y -- 形狀爲(1, number of examples)的判斷標籤向量 (圖片無貓爲0, 有貓爲1) m = X.shape[1]#數據集列數 A = sigmoid(np.dot(w.T,X) + b) cost = (-1/m)*np.sum(Y * np.log(A) + (1 - Y)*np.log(1-A)) dw = 1/m * np.dot(X, (A - Y).T) db = 1/m * np.sum(A-Y) assert(dw.shape == w.shape) assert(db.dtype == float) cost = np.squeeze(cost) assert(cost.shape == ()) grads = {"dw":dw, "db": db} #cost -- 邏輯迴歸的負對數似然成本 #dw -- 相對於w的損失梯度, 形狀和w同樣 #db -- 相對於b的損失梯度, 形狀和b同樣 return grads, cost
$$ \theta = \theta - \alpha \text{ } d\theta$$學習
def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False): # w -- weights, 一個形狀爲(num_px * num_px * 3, 1)的numpy數組 # b -- bias, 一個標量 # X -- 形狀爲(num_px * num_px * 3, number of examples)的數據集 # Y -- 形狀爲(1, number of examples)的判斷標籤向量 (圖片無貓爲0, 有貓爲1) #num_iterations -- 優化循環的迭代次數 #learning_rate -- 梯度降低更新規則的學習率 #print_cost -- 是否打印每100步的損失 costs = [] for i in range(num_iterations): grads, cost = propagate(w, b,X, Y) dw = grads["dw"] db = grads["db"] w = w - learning_rate * dw b = b - learning_rate * db if i % 100 == 0: costs.append(cost) if print_cost and i % 100 == 0: print("Cost after iteration %i: %f" %(i, cost)) params = {"w":w, "b":b} grads = {"dw": dw, "db":db} #params -- 包含w和b的字典 #grads -- 包含權重和誤差相對於成本函數的梯度的字典 #costs -- 在優化過程當中計算的全部成本列表,這將用於繪製學習曲線。 return params, grads, costs
$$\hat{Y} = A = \sigma(w^T X + b)$$測試
def predict(w, b, X): # w -- weights, 一個形狀爲(num_px * num_px * 3, 1)的numpy數組 # b -- bias, 一個標量 # X -- 形狀爲(num_px * num_px * 3, number of examples)的數據集 m = X.shape[1] Y_prediction = np.zeros((1, m)) w = w.reshape(X.shape[0], 1) A = sigmoid(np.dot(w.T, X) + b) for i in range(A.shape[1]): if A[0, i] <= 0.5: Y_prediction[0, i] = 0 else: Y_prediction[0, i] = 1 assert(Y_prediction.shape == (1, m)) # Y_prediction -- 包含全部預測(0/1)的numpy數組(向量),用於X中的示例 return Y_prediction
def model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False): ''' 經過調用以前實現的函數來構建邏輯迴歸模型 變量: X_train -- 一個形狀爲(num_px * num_px * 3,m_train)的numpy數組表示的訓練集 Y_train -- 形狀爲(1,m_train)的numpy數組的訓練標籤(矢量) X_test -- 一個(num_px * num_px * 3,m_test)的numpy數組形式表示測試集 Y_test -- 由形狀(1,m_test)的numpy數組(矢量)表示的測試標籤 num_iterations -- 表明迭代次數的超參數來優化參數 learning_rate -- 表示在optimize()的更新規則中使用的學習速率的超參數 print_cost -- 設置爲true則以每100次迭代打印成本 返回值: d -- 包含關於模型的信息的字典。 ''' # 用零初始化參數 w, b = initialize_with_zeros(X_train.shape[0]) # 梯度降低 parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost) # 從字典「參數」中檢索參數w和b w = parameters["w"] b = parameters["b"] # 預測測試/訓練集的例子 Y_prediction_test = predict(w, b, X_test) Y_prediction_train = predict(w, b, X_train) # 打印訓練/測試的結果 print("訓練準確性: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100)) print("測試準確性: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100)) d = {"costs": costs, "Y_prediction_test": Y_prediction_test, "Y_prediction_train" : Y_prediction_train, "w" : w, "b" : b, "learning_rate" : learning_rate, "num_iterations": num_iterations} return d
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset() train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T train_set_x = train_set_x_flatten/255. test_set_x = test_set_x_flatten/255. d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = 0.005, print_cost = True)
costs = np.squeeze(d['costs']) plt.plot(costs) plt.ylabel('cost') plt.xlabel('iterations (per hundreds)') plt.title("Learning rate =" + str(d["learning_rate"])) plt.show()