簡單神經網絡TensorFlow實現

學習TensorFlow筆記python

import tensorflow as tf

#定義變量
#Variable 定義張量及shape
w1= tf.Variable(tf.random_normal([2, 3], stddev=1, seed=1))
w2= tf.Variable(tf.random_normal([3, 1], stddev=1, seed=1))
with tf.Session() as sess:
    print(sess.run(w1.initializer))
    print(sess.run(w2.initializer))
#None
#None



#打印張量,查看數據shape等信息
print(w1)
print(w2)
#<tf.Variable 'Variable:0' shape=(2, 3) dtype=float32_ref>
#<tf.Variable 'Variable_1:0' shape=(3, 1) dtype=float32_ref>

#tf.constan是一個計算,結果爲一個張量,保存在變量x中
x = tf.constant([[0.7, 0.9]])
print(x)
#Tensor("Const:0", shape=(1, 2), dtype=float32)
with tf.Session() as sess:
    print(sess.run(x))
#[[ 0.69999999  0.89999998]]


#定義前向傳播的神經網絡
#matmul作矩陣乘法
a = tf.matmul(x, w1)   # x shape=(1, 2)   w1 shape=(2, 3)

print(a)
#Tensor("MatMul:0", shape=(1, 3), dtype=float32)

y = tf.matmul(a, w2)  #a shape=(1, 3)   w2 shape=(3, 1)
print(y)
#Tensor("MatMul_1:0", shape=(1, 1), dtype=float32)


#調用會話輸出結果
with tf.Session() as sess:
    sess.run(w1.initializer)
    sess.run(w2.initializer)
    print(sess.run(a))
    #[[-2.76635647  1.12854266  0.57783246]]
    print(sess.run(y))
    #[[ 3.95757794]]

#placeholder
x=tf.placeholder(tf.float32,shape=(1,2),name="input")
a=tf.matmul(x,w1)
y=tf.matmul(a,w2)
sess=tf.Session()
init_op=tf.global_variables_initializer()
sess.run(init_op)

print(sess.run(y,feed_dict={x:[[0.8,0.9]]}))
#[[ 4.2442317]]
x = tf.placeholder(tf.float32, shape=(3, 2), name="input")
a = tf.matmul(x, w1)
y = tf.matmul(a, w2)

sess = tf.Session()
#使用tf.global_variables_initializer()來初始化全部的變量
init_op = tf.global_variables_initializer()
sess.run(init_op)

print(sess.run(y, feed_dict={x: [[0.7,0.9],[0.1,0.4],[0.5,0.8]]}))

'''
[[ 3.95757794]
 [ 1.15376544]
 [ 3.16749239]]
'''

  總體神經網絡的實現算法

import tensorflow as tf
from   numpy.random import RandomState
#定義神經網絡的參數,輸入和輸出節點
batch_size=8
#均值爲0 方差爲1 隨機分佈知足正態分佈 shape爲2*3
w1=tf.Variable(tf.random_normal([2,3],stddev=1,seed=1))
w2=tf.Variable(tf.random_normal([3, 1], stddev=1, seed=1))
#shape 根據數據自動計算 batchsize個
x=tf.placeholder(tf.float32,shape=(None,2))
y_=tf.placeholder(tf.float32,shape=(None,1))

#定義前向傳播過程,損失函數及反向傳播算法

a=tf.matmul(x,w1)
y=tf.matmul(a,w2)
#損失函數 使用交叉熵
#優化方法使用AdamOptimizer
cross_entropy=-tf.reduce_mean(y_*tf.log(tf.clip_by_value(y,1e-10,1.0)))
train_step=tf.train.AdamOptimizer(learning_rate=0.001).minimize(cross_entropy)

rdm=RandomState(1)
#隨機生成128個數據 shape 128*2
X=rdm.rand(128,2)

#Y的值是模擬的 ,實際假設x2+x1若是大於1則標籤Y爲1 不然標籤Y爲0
Y=[[int(x1+x2<1)] for (x1,x2) in X]

#建立一個會話 ,運算計算圖
#全局初始化變量
STEPS = 5000
with tf.Session() as sess:
    init_op=tf.global_variables_initializer()
    sess.run(init_op)
    # 輸出目前(未經訓練)的參數取值。
    print("w1:", sess.run(w1))
    print("w2:", sess.run(w2))
    print("\n")
    for i in range(STEPS):
        start = (i * batch_size) % 128
        end = (i * batch_size) % 128 + batch_size
        sess.run(train_step, feed_dict={x: X[start:end], y_: Y[start:end]})
        if i % 1000 == 0:
            total_cross_entropy = sess.run(cross_entropy, feed_dict={x: X, y_: Y})
            print("After %d training step(s), cross entropy on all data is %g" % (i, total_cross_entropy))
            # 輸出訓練後的參數取值。
    print("\n")
    print("w1:", sess.run(w1))
    print("w2:", sess.run(w2))

'''

w1: [[-0.81131822  1.48459876  0.06532937]
 [-2.4427042   0.0992484   0.59122431]]
w2: [[-0.81131822]
 [ 1.48459876]
 [ 0.06532937]]


After 0 training step(s), cross entropy on all data is 0.0674925
After 1000 training step(s), cross entropy on all data is 0.0163385
After 2000 training step(s), cross entropy on all data is 0.00907547
After 3000 training step(s), cross entropy on all data is 0.00714436
After 4000 training step(s), cross entropy on all data is 0.00578471


w1: [[-1.96182752  2.58235407  1.68203771]
 [-3.46817183  1.06982315  2.11788988]]
w2: [[-1.82471502]
 [ 2.68546653]
 [ 1.41819501]]

Process finished with exit code 0
'''
相關文章
相關標籤/搜索