# 1.構建神經網絡git
import tensorflow as tf import numpy as np BATCH_SIZE=8 SEED=23455 rdm=np.random.RandomState(SEED) X=rdm.rand(32,2) Y=[[x1+x2+(rdm.rand()/10.0-0.05)] for (x1,x2) in X] # None表示傳入多組值 x=tf.placeholder(tf.float32,shape=[None,2]) y=tf.placeholder(tf.float32,shape=[None,1]) w1=tf.Variable(tf.random_normal([2,1],seed=1)) result=tf.matmul(x,w1)
# 2.激活函數(將線性轉爲非線性)網絡
本例子沒有激活函數,激活函數加在獲得的y值上dom
1.relu tf.nn.relu() 2.sigmoid tf.nn.sigmoid() 3.tanh tf.nn.tanh()
# 3.求損失函數函數
1.均方偏差
loos_mse=tf.reduce_mean(tf.square(y-result))
2.自定義學習
# PROFIT=9 # COST=1 # loss=tf.reduce_sum(tf.where(tf.greater(result,y),(result-y)*COST,(y-result)*PROFIT)) # loss=tf.reduce_sum(tf.where(tf.greater(result,y),(result-y)*COST,(y-result)*PROFIT))
3.交叉熵(表示兩個機率分佈之間的距離)優化
# y_真實值,y預測值 -tf.reduce_mean(y_*tf.log(tf.clip_by_value(y,1e-12,1.0))) 實際中經過softmax函數 ce=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,labels=tf.argmax(y_,1)) cem=tf.reduce_mean(ce)
# 4.學習率spa
# 5.反向傳播(減少loss)3d
# 參數優化code
1.滑動平均(優化W和b)orm
2.正則化