Tensorflow筆記——神經網絡圖像識別(四)搭建模塊化的神經網絡八股(正則化,指數衰減學習率,滑動平均等優化)

實戰案例:網絡

數據X【x0,x1】爲正太分佈隨機點,框架

標註Y_,當x0*x0+x1*x1<2時,y_=1(紅),不然y_=0(藍)dom

 

 創建三個.py文件函數

1.  generateds.py生成數據集學習

import numpy as np import matplotlib.pyplot as plt seed = 2
def generateds(): #基於seed產生隨機數
    rdm = np.random.RandomState(seed) #隨機數返回200列2行的矩陣,表示300組座標點(x0,x1)做爲輸入數據集
    X = rdm.randn(300,2) #若是X中的2個數的平方和<2,y=1,不然y=2
    #做爲輸入數據集的標籤(正確答案)
    Y_ = [int(x0*x0 + x1*x1 <2) for (x0,x1) in X] #爲方即可視化,遍歷Y_中的每一個元素,1爲紅,0爲藍
    Y_c = [['red' if y else 'blue'] for y in Y_] #對數據集X和標籤Y進行形狀整理,-1表示n,n行2列寫爲reshape(-1,2)
    X = np.vstack(X).reshape(-1,2) Y_ = np.vstack(Y_).reshape(-1,1) #print(X)
    #print(Y)
    #print(Y_c)
    return X,Y_,Y_c ''' if __name__ == '__main__': X,Y_,Y_c=generateds() #用 plt.scatter畫出數據集X中的點(x0.x1),Y_c表示顏色 plt.scatter(X[:,0], X[:,1],c=np.squeeze(Y_c)) plt.show() '''

2. forward.py 前向傳播spa

#coding:utf-8
import tensorflow as tf #定義神經網絡的輸入、參數和輸出,定義前項傳播過程
def get_weight(shape, regularizer): w = tf.Variable(tf.random_normal(shape),dtype=tf.float32) #把每一個w的正則化損失加到總損失losses中
    tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(regularizer)(w)) return w def get_bias(shape): b=tf.Variable(tf.constant(0.01, shape=shape)) return b #搭建前向傳播框架
def forward(x, regularizer): w1 = get_weight([2,11], regularizer) b1 = get_bias([11]) #(x和w1實現矩陣乘法 + b1)過非線性函數(激活函數)
    y1 = tf.nn.relu(tf.matmul(x, w1) + b1) w2 = get_weight([11,1], regularizer) b2 = get_bias([1]) #輸出層不過激活函數
    y = tf.matmul(y1, w2) + b2 return y 

3. backward.py 反向傳播code

#coding:utf-8
import tensorflow as tf import numpy as np import matplotlib.pyplot as plt import generateds import forward STEPS = 40000#共進行40000輪
BATCH_SIZE = 30#表示一次爲喂入NN多少組數據
LEARNING_RATE_BASE = 0.001#學習率基數,學習率初始值
LEARNING_RATE_DECAY = 0.999#學習率衰減率
REGULARIZER = 0.01#參數w的loss在總losses中的比例,即正則化權重

def backward(): x = tf.placeholder(tf.float32,(None,2)) y_ = tf.placeholder(tf.float32,(None,1)) X,Y_,Y_c = generateds.generateds() y=forward.forward(x, REGULARIZER) global_step = tf.Variable(0, trainable = False) learning_rate = tf.train.exponential_decay( LEARNING_RATE_BASE, global_step,300/BATCH_SIZE, LEARNING_RATE_DECAY, staircase = True) #定義損失函數
    loss_mse = tf.reduce_mean(tf.square(y-y_))#利用均方偏差
    loss_total = loss_mse + tf.add_n(tf.get_collection('losses')) #定義反向傳播方法:包含正則化
    train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss_total) with tf.Session() as sess : init_op = tf.global_variables_initializer() sess.run(init_op) for i in range(STEPS): start =(i*BATCH_SIZE) % 300 end = start + BATCH_SIZE sess.run(train_step, feed_dict={x:X[start:end], y_:Y_[start:end]}) if i % 2000 == 0: loss_v = sess.run(loss_total, feed_dict={x:X,y_:Y_}) print('After %d steps, loss if %f '%(i,loss_v)) #xx在-3到3之間步長爲0.01,yy在-3到3之間步長爲0.01生成二維網格座標點 
            xx, yy = np.mgrid[-3:3.01, -3:3:.01] #將xx,yy拉直,併合併成一個2列的矩陣,獲得一個網格座標點的集合
            grid = np.c_[xx.ravel(), yy.ravel()] #將網格座標點喂入神經網絡,probs爲輸出
            probs = sess.run(y, feed_dict={x:grid}) #將probs的shape調整成xx的樣子
            probs = probs.reshape(xx.shape) #畫出離散點
        plt.scatter(X[:,0], X[:,1], c=np.squeeze(Y_c)) #畫出probs,0.5的曲線
        plt.contour(xx, yy, probs, levels=[.5]) plt.show() if __name__ == '__main__': backward() 

 

 

 

輸出:orm

 若是對你有幫助,歡迎打賞!blog

相關文章
相關標籤/搜索